def extract_model_feature_ww(model, data, data_dir, params): dataloader = dp.get_dataloader(data, data_dir, **params) features, labels = extract_features_w(model, dataloader) features = np.array([logit.numpy() for logit in features.values()]) labels = np.array([logit for logit in labels.values()]) return features, labels
def get_feature(model, data, data_dir, config): config.set_training(False) dataloader = dp.get_dataloader(data, data_dir, config) features, _ = extract_features(model, dataloader) #features = {k:nn.functional.softmax(Variable(v), dim=1).values() # for k,v in features.items()} return features
def pre_from_feature_w(model, data, data_dir, params): dataloader = dp.get_dataloader(data, data_dir, **params) features, labels = extract_features_w(model, dataloader) features = np.array([logit.numpy() for logit in features.values()]) pred = np.argmax(features, axis=1) labels = np.array([logit for logit in labels.values()]) return pred, labels
def train_w(model, model_name, train_data, data_dir, num_classes, epochs=50): data_params = get_params_by_name(model_name) dataloader = dp.get_dataloader(train_data, data_dir, training=True, **data_params) train_model(model, dataloader, epochs=epochs) return model
def evaluate(model, dataset, params, metric=None): query, gallery = dataset.query, dataset.gallery dataloader = dp.get_dataloader( list(set(dataset.query) | set(dataset.gallery)), dataset.images_dir, **params) metric = DistanceMetric(algorithm='euclidean') metric.train(model, dataloader) evaluator = Evaluator(model) evaluator.evaluate(dataloader, query, gallery, metric)
def train(net, train_data, data_dir, config, device): config.set_training(True) # net = models.create(config.model_name, # num_features=config.num_features, # dropout=config.dropout, # num_classes=config.num_classes).to(device) # model = nn.DataParallel(model).cuda(device) dataloader = dp.get_dataloader(train_data, data_dir, config) train_model(net, dataloader, config, device) return net
def train(model_name, train_data, data_dir, num_classes, epochs=50): model = get_model_by_name(model_name, num_classes) model = nn.DataParallel(model).cuda() data_params = get_params_by_name(model_name) dataloader = dp.get_dataloader(train_data, data_dir, training=True, **data_params) train_model(model, dataloader, epochs=epochs) return model
def train(train_data, data_dir, config): config.set_training(True) model = models.create(config.model_name, num_features=config.num_features, dropout=config.dropout, num_classes=config.num_classes) #model = model.cuda() model = nn.DataParallel(model).cuda() dataloader = dp.get_dataloader(train_data, data_dir, config) train_model(model, dataloader, config) return model
def pre_from_feature_ww(model, data, data_dir, params): dataloader = dp.get_dataloader(data, data_dir, **params) features, labels = extract_features_w(model, dataloader) features = np.array([logit.numpy() for logit in features.values()]) pred = np.argmax(features, axis=1) labels = np.array([logit for logit in labels.values()]) features = features - np.max(features, axis=1).reshape((-1, 1)) # new features = np.exp(features) features = features / np.sum(features, axis=1).reshape((-1, 1)) return pred, labels, features
def extract_label(data, data_dir, params): labels = OrderedDict() dataloader = dp.get_dataloader(data, data_dir, **params) for i, (imgs, fnames, pids, _) in enumerate(dataloader): for fname, pid in zip(fnames, pids): labels[fname] = pid labels = np.array([logit for logit in labels.values()]) return labels
def predict_prob(model, data, data_dir, config): config.set_training(False) model.eval() dataloader = dp.get_dataloader(data, data_dir, config) probs = [] for i, (imgs, _, _, _, _) in enumerate(dataloader): inputs = to_torch(imgs) inputs = Variable(inputs, volatile=True) output = model(inputs) prob = nn.functional.softmax(output, dim=1) probs += [prob.data.cpu().numpy()] probs = np.concatenate(probs) return probs
def evaluate(model, dataset, config): config.set_training(False) query, gallery = dataset.query, dataset.gallery dataloader = dp.get_dataloader( list(set(dataset.query) | set(dataset.gallery)), dataset.images_dir, config) metric = DistanceMetric(algorithm=config.dist_metric) metric.train(model, dataloader) evaluator = Evaluator(model) evaluator.evaluate(dataloader, query, gallery, metric, print_freq=config.batch_size)
def train_wxp(model, testdata, model_name, train_data, data_dir, num_classes, epochs=50, weight=None): data_params = get_params_by_name(model_name) dataloader = dp.get_dataloader(train_data, data_dir, training=True, **data_params) #train_model(model,dataloader,epochs=epochs) model = train_model_w(model, dataloader, testdata, data_dir, data_params, epochs=epochs, weights=weight) return model
def evaluate(model, dataset, params, metric=None): val = dataset.val '''query,gallery = dataset.query,dataset.gallery dataloader = dp.get_dataloader( list(set(dataset.query) | set(dataset.gallery)), dataset.images_dir,**params) metric = DistanceMetric(algorithm='euclidean') metric.train(model,dataloader) evaluator = Evaluator(model) evaluator.evaluate(dataloader,query,gallery,metric)''' dataloader = dp.get_dataloader(val, dataset.images_dir, **params) '''features,_ = extract_features(model,dataloader) pred_y = np.argmax(sum(pred_probs),axis=1)''' #model = nn.DataParallel(model).cuda() #criterion = nn.CrossEntropyLoss().cuda() #trainer = Trainer(model,criterion) correct = 0 total = 0 for inputs in dataloader: # enumerate(dataloader): imgs, _, pids, _ = inputs inputs = [Variable(imgs)] targets = pids.cuda(async=True) #Variable(pids.cuda())#pids.cuda() outputs = model(*inputs) total += targets.data.size(0) correct += accuracy(outputs.data, targets) #.data) '''_, pred = torch.max(outputs.data, 1) #pred = pred.t() outputs, targets = to_torch(outputs), to_torch(targets) total += targets.size(0) correct += (pred == targets).sum #correct += pred.eq(target.view(1, -1).expand_as(pred)) #prec, = accuracy(outputs.data, targets.data) #prec = prec[0]''' print('%f\n' % (correct / total))
def get_feature(model, data, data_dir, params): dataloader = dp.get_dataloader(data, data_dir, **params) features, _ = extract_features(model, dataloader) return features