示例#1
0
    def _calc_pretrained_class_mean(self, normalize=False):
        params = self.params
        if params.dataset == "miniImagenet":
            dataset = "miniImageNet"
        trainTransform, valTransform, inputW, inputH, \
            trainDir, valDir, testDir, episodeJson, nbCls = dataset_setting(dataset, 1, self.image_size)
        base_loader = TrainLoader(self.batch_size, trainDir, valTransform)

        features = np.zeros((self.num_classes, self.feat_dim))
        counts = np.zeros(self.num_classes)
        print("saving pretrained mean")
        for epoch in range(0, 1):
            for i, (x, y) in enumerate(base_loader):
                x = Variable(x.cuda())
                out = self.get_features(x)
                if normalize:
                    out = self.normalize(out)
                for j in range(out.shape[0]):
                    np_out = out.data.cpu().numpy()[j]
                    np_y = y.numpy()[j]
                    features[np_y] += np_out
                    counts[np_y] += 1
                print_with_carriage_return("Epoch %d: %d/%d processed" %
                                           (epoch, i, len(base_loader)))
            end_carriage_return_print()
            # print(np.max(counts[64:]))
            print(np.max(features))
        for i in range(0, len(counts)):
            if counts[i] != 0:
                features[i] = features[i] / counts[i]
        return features
示例#2
0
 def train_d_specific_classifiers(self, n_clf):
     in_dim = int(self.feat_dim / n_clf)
     out_dim = self.num_classes
     self.clfs = nn.ModuleList(
         [nn.Linear(in_dim, out_dim) for i in range(n_clf)])
     self.clfs = self.clfs.cuda()
     # self.load_classifier_weights(n_clf, 12)
     if self.params.dataset == "miniImagenet":
         dataset = "miniImageNet"
     else:
         dataset = self.params.dataset
     trainTransform, valTransform, inputW, inputH, \
         trainDir, valDir, testDir, episodeJson, nbCls = dataset_setting(dataset, 1, self.image_size)
     base_loader = TrainLoader(self.batch_size, trainDir, trainTransform)
     loss_fn = nn.CrossEntropyLoss()
     params = self.clfs.parameters()
     optimizer = torch.optim.Adam(params)
     for epoch in range(0, 25):
         for i, (x, y) in enumerate(base_loader):
             optimizer.zero_grad()
             x = Variable(x.cuda())
             out = self.get_features(x)
             y = y.cuda()
             avg_loss = 0
             for j in range(n_clf):
                 start = in_dim * j
                 stop = start + in_dim
                 scores = self.clfs[j](out[:, start:stop])
                 loss = loss_fn(scores, y)
                 loss.backward(retain_graph=True)
                 avg_loss += loss.item()
             optimizer.step()
             if i % 10 == 0:
                 print("Epoch: %d, Batch %d/%d, Loss=%.3f" %
                       (epoch, i, len(base_loader), avg_loss / n_clf))
         # save model
         out_dir = "pretrain/clfs/%s_%s_%s_%d" % (
             self.method, self.model_name, self.base_dataset, n_clf)
         if not os.path.isdir(out_dir):
             os.makedirs(out_dir)
         outfile = os.path.join(out_dir, "%d.tar" % (epoch))
         torch.save(self.clfs.state_dict(), outfile)
示例#3
0
# Validation
parser.add_argument('--nFeat', type=int, default=640, help='feature dimension')

args = parser.parse_args()
print(args)

#############################################################################################
## datasets
trainTransform, valTransform, inputW, inputH, \
        trainDir, valDir, testDir, episodeJson, nbCls = \
        dataset_setting(args.dataset, 1)

args.inputW = inputW
args.inputH = inputH

trainLoader = TrainLoader(args.batchSize, trainDir, trainTransform)
valLoader = ValLoader(episodeJson, valDir, inputW, inputH, valTransform,
                      args.cuda)

with open(episodeJson, 'r') as f:
    episodeInfo = json.load(f)

args.nClsEpisode = len(episodeInfo[0]['Support'])
args.nSupport = len(episodeInfo[0]['Support'][0])
args.nQuery = len(episodeInfo[0]['Query'][0])

#############################################################################################
## model

#milestones=[50, 80, 100]
milestones = [100] if args.dataset == 'CUB' else [
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
    'val': transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
}

# Load the train and test data using seperate data loaders.
data_dir = 'SunData'
image_datasets = {}
image_datasets['train'] = TrainLoader(os.path.join(data_dir, 'train'), data_transforms['train'])
image_datasets['val'] = TestLoader(os.path.join(data_dir, 'val'), data_transforms['val'])
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=2,
                                             shuffle=True, num_workers=4)
              for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
print('dataset_sizes',dataset_sizes)
class_names = image_datasets['train'].classes

print("Data loaded")

class Flatten(nn.Module):
    def forward(self, x):
        N, C, H, W = x.size()
        return x.view(N, -1)
示例#5
0
    [0.485, 0.456, 0.406], [0.229, 0.224, 0.225])  # ImageNet normalization
trainTransform = transforms.Compose([
    transforms.RandomResizedCrop(args.imgSize),
    transforms.ToTensor(),
    normalize,
])

valTransform = transforms.Compose([
    transforms.Resize(args.imgSize),
    transforms.CenterCrop(args.imgSize),
    transforms.ToTensor(),
    normalize,
])

trainLoader = TrainLoader(batchSize=args.batchSize,
                          pairCSV=args.trainCSV,
                          imgDir=args.imgDir,
                          trainTransform=trainTransform)

valLoader = ValLoader(batchSize=args.batchSize,
                      pairCSV=args.valCSV,
                      imgDir=args.imgDir,
                      valTransform=valTransform)

if not os.path.exists(args.outDir):
    os.mkdir(args.outDir)

# Train
bestValLoss = np.inf
history = {'TrainLoss': [], 'ValLoss': []}
outHistory = os.path.join(args.outDir, 'history.json')
outModel = os.path.join(args.outDir, 'netBest.pth')