def train(cfg): # output output_dir = cfg.OUTPUT_DIR if os.path.exists(output_dir): raise KeyError("Existing path: ", output_dir) else: os.makedirs(output_dir) with open(os.path.join(output_dir, 'config.yaml'), 'w') as f_out: print(cfg, file=f_out) # logger logger = make_logger("project", output_dir, 'log') # device num_gpus = 0 if cfg.DEVICE == 'cuda': os.environ['CUDA_VISIBLE_DEVICES'] = cfg.DEVICE_ID num_gpus = len(cfg.DEVICE_ID.split(',')) logger.info("Using {} GPUs.\n".format(num_gpus)) cudnn.benchmark = True device = torch.device(cfg.DEVICE) # data train_loader, query_loader, gallery_loader, num_classes = make_loader(cfg) # model model = make_model(cfg, num_classes=num_classes) if num_gpus > 1: model = nn.DataParallel(model) # solver criterion = make_loss(cfg, num_classes) optimizer = make_optimizer(cfg, model) scheduler = make_scheduler(cfg, optimizer) # do_train trainer = Trainer(model=model, optimizer=optimizer, criterion=criterion, logger=logger, scheduler=scheduler, device=device) trainer.run(start_epoch=0, total_epoch=cfg.SOLVER.MAX_EPOCHS, train_loader=train_loader, query_loader=query_loader, gallery_loader=gallery_loader, print_freq=cfg.SOLVER.PRINT_FREQ, eval_period=cfg.SOLVER.EVAL_PERIOD, out_dir=output_dir) print('Done.')
def train(config_file, **kwargs): cfg.merge_from_file(config_file) if kwargs: opts = [] for k, v in kwargs.items(): opts.append(k) opts.append(v) cfg.merge_from_list(opts) cfg.freeze() #PersonReID_Dataset_Downloader('./datasets',cfg.DATASETS.NAMES) output_dir = cfg.OUTPUT_DIR if output_dir and not os.path.exists(output_dir): os.makedirs(output_dir) logger = make_logger("Reid_Baseline", output_dir, 'log') logger.info("Using {} GPUS".format(1)) logger.info("Loaded configuration file {}".format(config_file)) logger.info("Running with config:\n{}".format(cfg)) checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD eval_period = cfg.SOLVER.EVAL_PERIOD output_dir = cfg.OUTPUT_DIR device = torch.device(cfg.DEVICE) epochs = cfg.SOLVER.MAX_EPOCHS method = cfg.DATALOADER.SAMPLER train_loader, val_loader, num_query, num_classes = data_loader( cfg, cfg.DATASETS.NAMES) model = getattr(models, cfg.MODEL.NAME)(num_classes, cfg.MODEL.LAST_STRIDE) if 'center' in method: loss_fn, center_criterion = make_loss(cfg) optimizer, optimizer_center = make_optimizer_with_center( cfg, model, center_criterion) else: loss_fn = make_loss(cfg) optimizer = make_optimizer(cfg, model) scheduler = make_scheduler(cfg, optimizer) logger.info("Start training") since = time.time() for epoch in range(epochs): count = 0 running_loss = 0.0 running_acc = 0 for data in tqdm(train_loader, desc='Iteration', leave=False): model.train() images, labels = data if device: model.to(device) images, labels = images.to(device), labels.to(device) optimizer.zero_grad() if 'center' in method: optimizer_center.zero_grad() scores, feats = model(images) loss = loss_fn(scores, feats, labels) loss.backward() optimizer.step() if 'center' in method: for param in center_criterion.parameters(): param.grad.data *= (1. / cfg.SOLVER.CENTER_LOSS_WEIGHT) optimizer_center.step() count = count + 1 running_loss += loss.item() running_acc += (scores.max(1)[1] == labels).float().mean().item() logger.info( "Epoch[{}] Iteration[{}/{}] Loss: {:.3f}, Acc: {:.3f}, Base Lr: {:.2e}" .format(epoch + 1, count, len(train_loader), running_loss / count, running_acc / count, scheduler.get_lr()[0])) scheduler.step() if (epoch + 1) % checkpoint_period == 0: model.cpu() model.save(output_dir, epoch + 1) # Validation if (epoch + 1) % eval_period == 0: all_feats = [] all_pids = [] all_camids = [] for data in tqdm(val_loader, desc='Feature Extraction', leave=False): model.eval() with torch.no_grad(): images, pids, camids = data if device: model.to(device) images = images.to(device) feats = model(images) all_feats.append(feats) all_pids.extend(np.asarray(pids)) all_camids.extend(np.asarray(camids)) logger.info("start evaluation") cmc, mAP = evaluation(all_feats, all_pids, all_camids, num_query) logger.info("Validation Results - Epoch: {}".format(epoch + 1)) logger.info("mAP: {:.1%}".format(mAP)) for r in [1, 5, 10]: logger.info("CMC curve, Rank-{:<3}:{:.1%}".format( r, cmc[r - 1])) time_elapsed = time.time() - since logger.info('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) logger.info('-' * 10)
def train(config_file, **kwargs): # 1. config cfg.merge_from_file(config_file) if kwargs: opts = [] for k, v in kwargs.items(): opts.append(k) opts.append(v) cfg.merge_from_list(opts) cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir and not os.path.exists(output_dir): os.makedirs(output_dir) logger = make_logger("Reid_Baseline", output_dir, 'log') logger.info("Using {} GPUS".format(1)) logger.info("Loaded configuration file {}".format(config_file)) logger.info("Running with config:\n{}".format(cfg)) checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD eval_period = cfg.SOLVER.EVAL_PERIOD device = torch.device(cfg.DEVICE) epochs = cfg.SOLVER.MAX_EPOCHS # 2. datasets # Load the original dataset dataset_reference = init_dataset(cfg, cfg.DATASETS.NAMES + '_origin') #'Market1501_origin' train_set_reference = ImageDataset(dataset_reference.train, train_transforms) train_loader_reference = DataLoader(train_set_reference, batch_size=128, shuffle=False, num_workers=cfg.DATALOADER.NUM_WORKERS, collate_fn=train_collate_fn) # Load the one-shot dataset train_loader, val_loader, num_query, num_classes = data_loader( cfg, cfg.DATASETS.NAMES) # 3. load the model and optimizer model = getattr(models, cfg.MODEL.NAME)(num_classes) optimizer = make_optimizer(cfg, model) scheduler = make_scheduler(cfg, optimizer) loss_fn = make_loss(cfg) logger.info("Start training") since = time.time() top = 0 # the choose of the nearest sample top_update = 0 # the first iteration train 80 steps and the following train 40 # 4. Train and test for epoch in range(epochs): running_loss = 0.0 running_acc = 0 count = 1 # get nearest samples and reset the model if top_update < 80: train_step = 80 else: train_step = 40 if top_update % train_step == 0: print("top: ", top) A, path_labeled = PSP(model, train_loader_reference, train_loader, top, cfg) top += cfg.DATALOADER.NUM_JUMP model = getattr(models, cfg.MODEL.NAME)(num_classes) optimizer = make_optimizer(cfg, model) scheduler = make_scheduler(cfg, optimizer) A_store = A.clone() top_update += 1 for data in tqdm(train_loader, desc='Iteration', leave=False): model.train() images, labels_batch, img_path = data index, index_labeled = find_index_by_path(img_path, dataset_reference.train, path_labeled) images_relevant, GCN_index, choose_from_nodes, labels = load_relevant( cfg, dataset_reference.train, index, A_store, labels_batch, index_labeled) # if device: model.to(device) images = images_relevant.to(device) scores, feat = model(images) del images loss = loss_fn(scores, feat, labels.to(device), choose_from_nodes) optimizer.zero_grad() loss.backward() optimizer.step() count = count + 1 running_loss += loss.item() running_acc += (scores[choose_from_nodes].max(1)[1].cpu() == labels_batch).float().mean().item() scheduler.step() # for model save if you need # if (epoch+1) % checkpoint_period == 0: # model.cpu() # model.save(output_dir,epoch+1) # Validation if (epoch + 1) % eval_period == 0: all_feats = [] all_pids = [] all_camids = [] for data in tqdm(val_loader, desc='Feature Extraction', leave=False): model.eval() with torch.no_grad(): images, pids, camids = data model.to(device) images = images.to(device) feats = model(images) del images all_feats.append(feats.cpu()) all_pids.extend(np.asarray(pids)) all_camids.extend(np.asarray(camids)) cmc, mAP = evaluation(all_feats, all_pids, all_camids, num_query) logger.info("Validation Results - Epoch: {}".format(epoch + 1)) logger.info("mAP: {:.1%}".format(mAP)) for r in [1, 5, 10, 20]: logger.info("CMC curve, Rank-{:<3}:{:.1%}".format( r, cmc[r - 1])) time_elapsed = time.time() - since logger.info('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) logger.info('-' * 10)
def main(config, args): scheduler = make_scheduler(config, args) scheduler.process_problem()
def train(config_file, resume=False, **kwargs): cfg.merge_from_file(config_file) if kwargs: opts = [] for k, v in kwargs.items(): opts.append(k) opts.append(v) cfg.merge_from_list(opts) cfg.freeze() # [PersonReID_Dataset_Downloader(cfg.DATASETS.STORE_DIR,dataset) for dataset in cfg.DATASETS.SOURCE] # [PersonReID_Dataset_Downloader(cfg.DATASETS.STORE_DIR,dataset) for dataset in cfg.DATASETS.TARGET] output_dir = cfg.OUTPUT_DIR if output_dir and not os.path.exists(output_dir): os.makedirs(output_dir) logger = make_logger("Reid_Baseline", output_dir, 'log', resume) if not resume: logger.info("Using {} GPUS".format(1)) logger.info("Loaded configuration file {}".format(config_file)) logger.info("Running with config:\n{}".format(cfg)) checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD eval_period = cfg.SOLVER.EVAL_PERIOD output_dir = cfg.OUTPUT_DIR device = torch.device(cfg.DEVICE) epochs = cfg.SOLVER.MAX_EPOCHS train_loader, _, _, num_classes = data_loader(cfg, cfg.DATASETS.SOURCE, merge=cfg.DATASETS.MERGE) model = getattr(models, cfg.MODEL.NAME)(num_classes, cfg.MODEL.LAST_STRIDE, cfg.MODEL.POOL) if resume: checkpoints = get_last_stats(output_dir) try: model_dict = torch.load(checkpoints[cfg.MODEL.NAME]) except KeyError: model_dict = torch.load(checkpoints[str(type(model))]) model.load_state_dict(model_dict) if device: model.to(device) # must be done before the optimizer generation optimizer = make_optimizer(cfg, model) scheduler = make_scheduler(cfg, optimizer) base_epo = 0 if resume: optimizer.load_state_dict(torch.load(checkpoints['opt'])) sch_dict = torch.load(checkpoints['sch']) scheduler.load_state_dict(sch_dict) base_epo = checkpoints['epo'] loss_fn = make_loss(cfg) if not resume: logger.info("Start training") since = time.time() for epoch in range(epochs): count = 0 running_loss = 0.0 running_acc = 0 for data in tqdm(train_loader, desc='Iteration', leave=False): model.train() images, labels, domains = data if device: model.to(device) images, labels, domains = images.to(device), labels.to( device), domains.to(device) optimizer.zero_grad() scores, feats = model(images) loss = loss_fn(scores, feats, labels) loss.backward() optimizer.step() count = count + 1 running_loss += loss.item() running_acc += ( scores[0].max(1)[1] == labels).float().mean().item() logger.info( "Epoch[{}] Iteration[{}/{}] Loss: {:.3f}, Acc: {:.3f}, Base Lr: {:.2e}" .format(epoch + 1 + base_epo, count, len(train_loader), running_loss / count, running_acc / count, scheduler.get_lr()[0])) scheduler.step() if (epoch + 1 + base_epo) % checkpoint_period == 0: model.cpu() model.save(output_dir, epoch + 1 + base_epo) torch.save( optimizer.state_dict(), os.path.join(output_dir, 'opt_epo' + str(epoch + 1 + base_epo) + '.pth')) torch.save( scheduler.state_dict(), os.path.join(output_dir, 'sch_epo' + str(epoch + 1 + base_epo) + '.pth')) # Validation if (epoch + base_epo + 1) % eval_period == 0: # Validation on Target Dataset for target in cfg.DATASETS.TARGET: mAPs = [] cmcs = [] for i in range(iteration): set_seeds(i) _, val_loader, num_query, _ = data_loader(cfg, (target, ), merge=False) all_feats = [] all_pids = [] all_camids = [] since = time.time() for data in tqdm(val_loader, desc='Feature Extraction', leave=False): model.eval() with torch.no_grad(): images, pids, camids = data if device: model.to(device) images = images.to(device) feats = model(images) feats /= feats.norm(dim=-1, keepdim=True) all_feats.append(feats) all_pids.extend(np.asarray(pids)) all_camids.extend(np.asarray(camids)) cmc, mAP = evaluation(all_feats, all_pids, all_camids, num_query) mAPs.append(mAP) cmcs.append(cmc) mAP = np.mean(np.array(mAPs)) cmc = np.mean(np.array(cmcs), axis=0) mAP_std = np.std(np.array(mAPs)) cmc_std = np.std(np.array(cmcs), axis=0) logger.info("Validation Results: {} - Epoch: {}".format( target, epoch + 1 + base_epo)) logger.info("mAP: {:.1%} (std: {:.3%})".format(mAP, mAP_std)) for r in [1, 5, 10]: logger.info( "CMC curve, Rank-{:<3}:{:.1%} (std: {:.3%})".format( r, cmc[r - 1], cmc_std[r - 1])) reset() time_elapsed = time.time() - since logger.info('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) logger.info('-' * 10)
def train(config_file, resume=False, iteration=10, STEP=4, **kwargs): """ Parameter --------- resume : bool If true, continue the training and append logs to the previous log. iteration : int number of loops to test Random Datasets. STEP : int Number of steps to train the discriminator per batch """ cfg.merge_from_file(config_file) if kwargs: opts = [] for k, v in kwargs.items(): opts.append(k) opts.append(v) cfg.merge_from_list(opts) cfg.freeze() # [PersonReID_Dataset_Downloader('./datasets', name) for name in cfg.DATASETS.NAMES] output_dir = cfg.OUTPUT_DIR if output_dir and not os.path.exists(output_dir): os.makedirs(output_dir) logger = make_logger("Reid_Baseline", output_dir, 'log', resume) if not resume: logger.info("Using {} GPUS".format(1)) logger.info("Loaded configuration file {}".format(config_file)) logger.info("Running with config:\n{}".format(cfg)) checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD eval_period = cfg.SOLVER.EVAL_PERIOD output_dir = cfg.OUTPUT_DIR device = torch.device(cfg.DEVICE) epochs = cfg.SOLVER.MAX_EPOCHS sources = cfg.DATASETS.SOURCE target = cfg.DATASETS.TARGET pooling = cfg.MODEL.POOL last_stride = cfg.MODEL.LAST_STRIDE # tf_board_path = os.path.join(output_dir, 'tf_runs') # if os.path.exists(tf_board_path): # shutil.rmtree(tf_board_path) # writer = SummaryWriter(tf_board_path) gan_d_param = cfg.MODEL.D_PARAM gan_g_param = cfg.MODEL.G_PARAM class_param = cfg.MODEL.CLASS_PARAM """Set up""" train_loader, _, _, num_classes = data_loader(cfg, cfg.DATASETS.SOURCE, merge=cfg.DATASETS.MERGE) num_classes_train = [ data_loader(cfg, [source], merge=False)[3] for source in cfg.DATASETS.SOURCE ] # based on input datasets bias = (max(num_classes_train)) / np.array(num_classes_train) bias = bias / bias.sum() * 5 discriminator_loss = LabelSmoothingLoss(len(sources), weights=bias, smoothing=0.1) minus_generator_loss = LabelSmoothingLoss(len(sources), weights=bias, smoothing=0.) classification_loss = LabelSmoothingLoss(num_classes, smoothing=0.1) from loss.triplet_loss import TripletLoss triplet = TripletLoss(cfg.SOLVER.MARGIN) triplet_loss = lambda feat, labels: triplet(feat, labels)[0] module = getattr(generalizers, cfg.MODEL.NAME) D = getattr(module, 'Generalizer_D')(len(sources)) G = getattr(module, 'Generalizer_G')(num_classes, last_stride, pooling) if resume: checkpoints = get_last_stats(output_dir) D.load_state_dict(torch.load(checkpoints[str(type(D))])) G.load_state_dict(torch.load(checkpoints[str(type(G))])) if device: # must be done before the optimizer generation D.to(device) G.to(device) discriminator_optimizer = Adam(D.parameters(), lr=cfg.SOLVER.BASE_LR, weight_decay=cfg.SOLVER.WEIGHT_DECAY) generator_optimizer = Adam(G.parameters(), lr=cfg.SOLVER.BASE_LR, weight_decay=cfg.SOLVER.WEIGHT_DECAY) discriminator_scheduler = make_scheduler(cfg, discriminator_optimizer) generator_scheduler = make_scheduler(cfg, generator_optimizer) base_epo = 0 if resume: discriminator_optimizer.load_state_dict( torch.load(checkpoints['D_opt'])) generator_optimizer.load_state_dict(torch.load(checkpoints['G_opt'])) discriminator_scheduler.load_state_dict( torch.load(checkpoints['D_sch'])) generator_scheduler.load_state_dict(torch.load(checkpoints['G_sch'])) base_epo = checkpoints['epo'] # Modify the labels: # RULE: # according to the order of names in cfg.DATASETS.NAMES, add base numebr since = time.time() if not resume: logger.info("Start training") batch_count = 0 STEP = 4 Best_R1s = [0, 0, 0, 0] Benchmark = [69.6, 43.7, 59.4, 78.2] for epoch in range(epochs): # anneal = sigmoid(annealing_base + annealing_factor*(epoch+base_epo)) anneal = max(1 - (1 / 80 * epoch), 0) count = 0 running_g_loss = 0. running_source_loss = 0. running_class_acc = 0. running_acc_source = 0. running_class_loss = 0. reset() for data in tqdm(train_loader, desc='Iteration', leave=False): # NOTE: zip ensured the shortest dataset dominates the iteration D.train() G.train() images, labels, domains = data if device: D.to(device) G.to(device) images, labels, domains = images.to(device), labels.to( device), domains.to(device) """Start Training D""" feature_vec, scores, gan_vec = G(images) for param in G.parameters(): param.requires_grad = False for param in D.parameters(): param.requires_grad = True for _ in range(STEP): discriminator_optimizer.zero_grad() pred_domain = D( [v.detach() for v in gan_vec] if isinstance(gan_vec, list) else gan_vec.detach()) # NOTE: Feat output! Not Probability! d_losses, accs = discriminator_loss(pred_domain, domains, compute_acc=True) d_source_loss = d_losses.mean() d_source_acc = accs.float().mean().item() d_loss = d_source_loss w_d_loss = anneal * d_loss * gan_d_param w_d_loss.backward() discriminator_optimizer.step() """Start Training G""" for param in D.parameters(): param.requires_grad = False for param in G.parameters(): param.requires_grad = True generator_optimizer.zero_grad() g_loss = -1. * minus_generator_loss(D(gan_vec), domains).mean() class_loss = classification_loss(scores, labels).mean() tri_loss = triplet_loss(feature_vec, labels) class_loss = class_loss * cfg.SOLVER.LAMBDA1 + tri_loss * cfg.SOLVER.LAMBDA2 w_regularized_g_loss = anneal * gan_g_param * g_loss + class_param * class_loss w_regularized_g_loss.backward() generator_optimizer.step() """Stop training""" running_g_loss += g_loss.item() running_source_loss += d_source_loss.item() running_acc_source += d_source_acc # TODO: assume all batches are the same size running_class_loss += class_loss.item() class_acc = (scores.max(1)[1] == labels).float().mean().item() running_class_acc += class_acc # writer.add_scalar('D_loss', d_source_loss.item(), batch_count) # writer.add_scalar('D_acc', d_source_acc, batch_count) # writer.add_scalar('G_loss', g_loss.item(), batch_count) # writer.add_scalar('Class_loss', class_loss.item(), batch_count) # writer.add_scalar('Class_acc', class_acc, batch_count) torch.cuda.empty_cache() count = count + 1 batch_count += 1 # if count == 10:break logger.info( "Epoch[{}] Iteration[{}] Loss: [G] {:.3f} [D] {:.3f} [Class] {:.3f}, Acc: [Class] {:.3f} [D] {:.3f}, Base Lr: {:.2e}" .format(epoch + base_epo + 1, count, running_g_loss / count, running_source_loss / count, running_class_loss / count, running_class_acc / count, running_acc_source / count, generator_scheduler.get_lr()[0])) generator_scheduler.step() discriminator_scheduler.step() if (epoch + base_epo + 1) % checkpoint_period == 0: G.cpu() G.save(output_dir, epoch + base_epo + 1) D.cpu() D.save(output_dir, epoch + base_epo + 1) torch.save( generator_optimizer.state_dict(), os.path.join(output_dir, 'G_opt_epo' + str(epoch + base_epo + 1) + '.pth')) torch.save( discriminator_optimizer.state_dict(), os.path.join(output_dir, 'D_opt_epo' + str(epoch + base_epo + 1) + '.pth')) torch.save( generator_scheduler.state_dict(), os.path.join(output_dir, 'G_sch_epo' + str(epoch + base_epo + 1) + '.pth')) torch.save( discriminator_scheduler.state_dict(), os.path.join(output_dir, 'D_sch_epo' + str(epoch + base_epo + 1) + '.pth')) # Validation if (epoch + base_epo + 1) % eval_period == 0: # Validation on Target Dataset for target in cfg.DATASETS.TARGET: mAPs = [] cmcs = [] for i in range(iteration): set_seeds(i) _, val_loader, num_query, _ = data_loader(cfg, (target, ), merge=False, verbose=False) all_feats = [] all_pids = [] all_camids = [] since = time.time() for data in tqdm(val_loader, desc='Feature Extraction', leave=False): G.eval() with torch.no_grad(): images, pids, camids = data if device: G.to(device) images = images.to(device) feats = G(images) feats /= feats.norm(dim=-1, keepdim=True) all_feats.append(feats) all_pids.extend(np.asarray(pids)) all_camids.extend(np.asarray(camids)) cmc, mAP = evaluation(all_feats, all_pids, all_camids, num_query) mAPs.append(mAP) cmcs.append(cmc) mAP = np.mean(np.array(mAPs)) cmc = np.mean(np.array(cmcs), axis=0) mAP_std = np.std(np.array(mAPs)) cmc_std = np.std(np.array(cmcs), axis=0) logger.info("Validation Results: {} - Epoch: {}".format( target, epoch + 1 + base_epo)) logger.info("mAP: {:.1%} (std: {:.3%})".format(mAP, mAP_std)) for r in [1, 5, 10]: logger.info( "CMC curve, Rank-{:<3}:{:.1%} (std: {:.3%})".format( r, cmc[r - 1], cmc_std[r - 1])) # Record Best if (epoch + base_epo + 1) > 60 and ((epoch + base_epo + 1) % 5 == 1 or (epoch + base_epo + 1) % 5 == 2): # Validation on Target Dataset R1s = [] for target in cfg.DATASETS.TARGET: mAPs = [] cmcs = [] for i in range(iteration): set_seeds(i) _, val_loader, num_query, _ = data_loader(cfg, (target, ), merge=False, verbose=False) all_feats = [] all_pids = [] all_camids = [] since = time.time() for data in tqdm(val_loader, desc='Feature Extraction', leave=False): G.eval() with torch.no_grad(): images, pids, camids = data if device: G.to(device) images = images.to(device) feats = G(images) feats /= feats.norm(dim=-1, keepdim=True) all_feats.append(feats) all_pids.extend(np.asarray(pids)) all_camids.extend(np.asarray(camids)) cmc, mAP = evaluation(all_feats, all_pids, all_camids, num_query) mAPs.append(mAP) cmcs.append(cmc) mAP = np.mean(np.array(mAPs)) cmc = np.mean(np.array(cmcs), axis=0) R1 = cmc[0] R1s.append(R1) if (np.array(R1s) > np.array(Best_R1s)).all(): logger.info("Best checkpoint at {}: {}".format( str(epoch + base_epo + 1), ', '.join([str(s) for s in R1s]))) Best_R1s = R1s G.cpu() G.save(output_dir, -1) D.cpu() D.save(output_dir, -1) torch.save( generator_optimizer.state_dict(), os.path.join(output_dir, 'G_opt_epo' + str(-1) + '.pth')) torch.save( discriminator_optimizer.state_dict(), os.path.join(output_dir, 'D_opt_epo' + str(-1) + '.pth')) torch.save( generator_scheduler.state_dict(), os.path.join(output_dir, 'G_sch_epo' + str(-1) + '.pth')) torch.save( discriminator_scheduler.state_dict(), os.path.join(output_dir, 'D_sch_epo' + str(-1) + '.pth')) else: logger.info("Rank 1 results: {}".format(', '.join( [str(s) for s in R1s]))) time_elapsed = time.time() - since logger.info('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) logger.info('-' * 10)
def train(config_file1, config_file2, **kwargs): # 1. config cfg.merge_from_file(config_file1) if kwargs: opts = [] for k, v in kwargs.items(): opts.append(k) opts.append(v) cfg.merge_from_list(opts) #cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir and not os.path.exists(output_dir): os.makedirs(output_dir) logger = make_logger("Reid_Baseline", output_dir, 'log') #logger.info("Using {} GPUS".format(1)) logger.info("Loaded configuration file {}".format(config_file1)) logger.info("Running with config:\n{}".format(cfg)) checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD eval_period = cfg.SOLVER.EVAL_PERIOD #device = torch.device(cfg.DEVICE) epochs = cfg.SOLVER.MAX_EPOCHS # 2. datasets # Load the original dataset #dataset_reference = init_dataset(cfg, cfg.DATASETS.NAMES ) dataset_reference = init_dataset(cfg, cfg.DATASETS.NAMES + '_origin') #'Market1501_origin' train_set_reference = ImageDataset(dataset_reference.train, train_transforms) train_loader_reference = DataLoader(train_set_reference, batch_size=128, shuffle=False, num_workers=cfg.DATALOADER.NUM_WORKERS, collate_fn=train_collate_fn) #不用放到网络里,所以不用transform # Load the one-shot dataset train_loader, val_loader, num_query, num_classes = data_loader( cfg, cfg.DATASETS.NAMES) # 3. load the model and optimizer model = getattr(models, cfg.MODEL.NAME)(num_classes) optimizer = make_optimizer(cfg, model) scheduler = make_scheduler(cfg, optimizer) loss_fn = make_loss(cfg) logger.info("Start training") since = time.time() if torch.cuda.device_count() > 1: print("Use", torch.cuda.device_count(), 'gpus') elif torch.cuda.device_count() == 1: print("Use", torch.cuda.device_count(), 'gpu') model = nn.DataParallel(model) top = 0 # the choose of the nearest sample top_update = 0 # the first iteration train 80 steps and the following train 40 train_time = 0 #1表示训练几次gan bound = 1 #究竟训练几次,改成多次以后再说 lock = False train_compen = 0 # 4. Train and test for epoch in range(epochs): running_loss = 0.0 running_acc = 0 count = 1 # get nearest samples and reset the model if top_update < 80: train_step = 80 #重新gan生成的图像第一次是否需要训练80次,看看是否下一次输入的图片变少了吧 else: train_step = 40 #if top_update % train_step == 0: if top_update % train_step == 0 and train_compen == 0: print("top: ", top) #作者原来的实验top取到41,这里折中(是否要折中也是个实验测试的点) #if 1==1: if top >= 8 and train_time < bound: train_compen = (top - 1) * 40 + 80 #build_image(A,train_loader_reference,train_loader) train_time += 1 #gan的训练模式 mode = 'train' retrain(mode) #gan生成图像到原来数据集 produce() cfg.merge_from_file(config_file2) output_dir = cfg.OUTPUT_DIR if output_dir and not os.path.exists(output_dir): os.makedirs(output_dir) logger = make_logger("Reid_Baseline", output_dir, 'log') logger.info( "Loaded configuration file {}".format(config_file2)) logger.info("Running with config:\n{}".format(cfg)) dataset_reference = init_dataset( cfg, cfg.DATASETS.NAMES + '_origin') #'Market1501_origin' train_set_reference = ImageDataset(dataset_reference.train, train_transforms) train_loader_reference = DataLoader( train_set_reference, batch_size=128, shuffle=False, num_workers=cfg.DATALOADER.NUM_WORKERS, collate_fn=train_collate_fn) dataset_ref = init_dataset(cfg, cfg.DATASETS.NAMES + '_ref') #'Market1501_origin' train_set_ref = ImageDataset(dataset_ref.train, train_transforms) train_loader_ref = DataLoader( train_set_ref, batch_size=128, shuffle=False, num_workers=cfg.DATALOADER.NUM_WORKERS, collate_fn=train_collate_fn) lock = True if lock == True: A, path_labeled = PSP2(model, train_loader_reference, train_loader, train_loader_ref, top, logger, cfg) lock = False else: A, path_labeled = PSP(model, train_loader_reference, train_loader, top, logger, cfg) #vis = len(train_loader_reference.dataset) #A= torch.ones(vis, len(train_loader_reference.dataset)) #build_image(A,train_loader_reference,train_loader) top += cfg.DATALOADER.NUM_JUMP model = getattr(models, cfg.MODEL.NAME)(num_classes) model = nn.DataParallel(model) optimizer = make_optimizer(cfg, model) scheduler = make_scheduler(cfg, optimizer) A_store = A.clone() top_update += 1 for data in tqdm(train_loader, desc='Iteration', leave=False): model.train() images, labels_batch, img_path = data index, index_labeled = find_index_by_path(img_path, dataset_reference.train, path_labeled) images_relevant, GCN_index, choose_from_nodes, labels = load_relevant( cfg, dataset_reference.train, index, A_store, labels_batch, index_labeled) # if device: model.to(device) images = images_relevant.to(device) scores, feat = model(images) del images loss = loss_fn(scores, feat, labels.to(device), choose_from_nodes) optimizer.zero_grad() loss.backward() optimizer.step() count = count + 1 running_loss += loss.item() running_acc += (scores[choose_from_nodes].max(1)[1].cpu() == labels_batch).float().mean().item() scheduler.step() # for model save if you need # if (epoch+1) % checkpoint_period == 0: # model.cpu() # model.save(output_dir,epoch+1) # Validation if (epoch + 1) % eval_period == 0: all_feats = [] all_pids = [] all_camids = [] for data in tqdm(val_loader, desc='Feature Extraction', leave=False): model.eval() with torch.no_grad(): images, pids, camids = data model.to(device) images = images.to(device) feats = model(images) del images all_feats.append(feats.cpu()) all_pids.extend(np.asarray(pids)) all_camids.extend(np.asarray(camids)) cmc, mAP = evaluation(all_feats, all_pids, all_camids, num_query) logger.info("Validation Results - Epoch: {}".format(epoch + 1)) logger.info("mAP: {:.1%}".format(mAP)) for r in [1, 5, 10, 20]: logger.info("CMC curve, Rank-{:<3}:{:.1%}".format( r, cmc[r - 1])) if train_compen > 0: train_compen -= 1 time_elapsed = time.time() - since logger.info('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) logger.info('-' * 10)