Beispiel #1
0
    def _calc_pretrained_class_mean(self, normalize=False):
        params = self.params
        if params.dataset == "miniImagenet":
            dataset = "miniImageNet"
        trainTransform, valTransform, inputW, inputH, \
            trainDir, valDir, testDir, episodeJson, nbCls = dataset_setting(dataset, 1, self.image_size)
        base_loader = TrainLoader(self.batch_size, trainDir, valTransform)

        features = np.zeros((self.num_classes, self.feat_dim))
        counts = np.zeros(self.num_classes)
        print("saving pretrained mean")
        for epoch in range(0, 1):
            for i, (x, y) in enumerate(base_loader):
                x = Variable(x.cuda())
                out = self.get_features(x)
                if normalize:
                    out = self.normalize(out)
                for j in range(out.shape[0]):
                    np_out = out.data.cpu().numpy()[j]
                    np_y = y.numpy()[j]
                    features[np_y] += np_out
                    counts[np_y] += 1
                print_with_carriage_return("Epoch %d: %d/%d processed" %
                                           (epoch, i, len(base_loader)))
            end_carriage_return_print()
            # print(np.max(counts[64:]))
            print(np.max(features))
        for i in range(0, len(counts)):
            if counts[i] != 0:
                features[i] = features[i] / counts[i]
        return features
Beispiel #2
0
 def train_d_specific_classifiers(self, n_clf):
     in_dim = int(self.feat_dim / n_clf)
     out_dim = self.num_classes
     self.clfs = nn.ModuleList(
         [nn.Linear(in_dim, out_dim) for i in range(n_clf)])
     self.clfs = self.clfs.cuda()
     # self.load_classifier_weights(n_clf, 12)
     if self.params.dataset == "miniImagenet":
         dataset = "miniImageNet"
     else:
         dataset = self.params.dataset
     trainTransform, valTransform, inputW, inputH, \
         trainDir, valDir, testDir, episodeJson, nbCls = dataset_setting(dataset, 1, self.image_size)
     base_loader = TrainLoader(self.batch_size, trainDir, trainTransform)
     loss_fn = nn.CrossEntropyLoss()
     params = self.clfs.parameters()
     optimizer = torch.optim.Adam(params)
     for epoch in range(0, 25):
         for i, (x, y) in enumerate(base_loader):
             optimizer.zero_grad()
             x = Variable(x.cuda())
             out = self.get_features(x)
             y = y.cuda()
             avg_loss = 0
             for j in range(n_clf):
                 start = in_dim * j
                 stop = start + in_dim
                 scores = self.clfs[j](out[:, start:stop])
                 loss = loss_fn(scores, y)
                 loss.backward(retain_graph=True)
                 avg_loss += loss.item()
             optimizer.step()
             if i % 10 == 0:
                 print("Epoch: %d, Batch %d/%d, Loss=%.3f" %
                       (epoch, i, len(base_loader), avg_loss / n_clf))
         # save model
         out_dir = "pretrain/clfs/%s_%s_%s_%d" % (
             self.method, self.model_name, self.base_dataset, n_clf)
         if not os.path.isdir(out_dir):
             os.makedirs(out_dir)
         outfile = os.path.join(out_dir, "%d.tar" % (epoch))
         torch.save(self.clfs.state_dict(), outfile)
Beispiel #3
0
# Fix random seed to reproduce results
set_random_seed(args.seed)
logger.info('Start experiment with random seed: {:d}'.format(args.seed))
logger.info(args)

# GPU setup
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
if args.gpu != '':
    args.cuda = True
device = torch.device('cuda' if args.cuda else 'cpu')

#############################################################################################
## Datasets
trainTransform, valTransform, inputW, inputH, \
        trainDir, valDir, testDir, episodeJson, nbCls = \
        dataset_setting(args.dataset, args.nSupport)

trainLoader = BatchSampler(imgDir = trainDir,
                           nClsEpisode = args.nClsEpisode,
                           nSupport = args.nSupport,
                           nQuery = args.nQuery,
                           transform = trainTransform,
                           useGPU = args.cuda,
                           inputW = inputW,
                           inputH = inputH,
                           batchSize = args.batchSize)

valLoader = ValLoader(episodeJson,
                      valDir,
                      inputW,
                      inputH,
Beispiel #4
0
# Lr WarmUp
parser.add_argument('--totalIter',
                    type=int,
                    default=6000,
                    help='total iterations for learning rate warm')
# Validation
parser.add_argument('--nFeat', type=int, default=640, help='feature dimension')

args = parser.parse_args()
print(args)

#############################################################################################
## datasets
trainTransform, valTransform, inputW, inputH, \
        trainDir, valDir, testDir, episodeJson, nbCls = \
        dataset_setting(args.dataset, 1)

args.inputW = inputW
args.inputH = inputH

trainLoader = TrainLoader(args.batchSize, trainDir, trainTransform)
valLoader = ValLoader(episodeJson, valDir, inputW, inputH, valTransform,
                      args.cuda)

with open(episodeJson, 'r') as f:
    episodeInfo = json.load(f)

args.nClsEpisode = len(episodeInfo[0]['Support'])
args.nSupport = len(episodeInfo[0]['Support'][0])
args.nQuery = len(episodeInfo[0]['Query'][0])
Beispiel #5
0
# Setup logging to file and stdout
logger = get_logger(args.logDir, args.expName)

logger.info(args)

# GPU setup
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
if args.gpu != '':
    args.cuda = True
device = torch.device('cuda' if args.cuda else 'cpu')

#############################################################################################
## Datasets
trainTransform, valTransform, inputW, inputH, \
            trainDir, valDir, testDir, episodeJson, nbCls = \
            dataset_setting(args.dataset, args.nSupport, args.image_size)

trainLoader = BatchSampler(imgDir=trainDir,
                           nClsEpisode=args.nClsEpisode,
                           nSupport=args.nSupport,
                           nQuery=args.nQuery,
                           transform=trainTransform,
                           useGPU=args.cuda,
                           inputW=inputW,
                           inputH=inputH,
                           batchSize=args.batchSize)
'''
valLoader = ValLoader(episodeJson,
                      valDir,
                      inputW,
                      inputH,