def run(self, test): """Overrides to provide plugin hooks and defer all output to the test result class. """ #from father class code wrapper = self.config.plugins.prepareTest(test) if wrapper is not None: test = wrapper wrapped = self.config.plugins.setOutputStream(self.stream) if wrapped is not None: self.stream = wrapped result = self._makeResult() start = time.time() runtime = launcher.Runtime(self.config.options.config_file) runtime.setup_environment() runtime.start_test() setattr(test.config, 'runtime', runtime) test(result) runtime.stop_test() #from father class code stop = time.time() result.printErrors() result.printSummary(start, stop) self.config.plugins.finalize(result) return result
def main(config): # init loaders and base loaders = ReIDLoaders(config) base = Base(config) # make directions make_dirs(base.output_path) # init logger logger = Logger(os.path.join(config.output_path, 'log.txt')) logger(config) assert config.mode in ['train', 'test', 'visualize'] if config.mode == 'train': # train mode # automatically resume model from the latest one if config.auto_resume_training_from_lastest_steps: start_train_epoch = base.resume_last_model() # main loop for current_epoch in range(start_train_epoch, config.total_train_epochs): # save model base.save_model(current_epoch) # train _, results = train_an_epoch(config, base, loaders, current_epoch) logger('Time: {}; Epoch: {}; {}'.format(time_now(), current_epoch, results)) # test base.save_model(config.total_train_epochs) mAP, CMC, pres, recalls, thresholds = test(config, base, loaders) logger('Time: {}; Test Dataset: {}, \nmAP: {} \nRank: {}'.format( time_now(), config.test_dataset, mAP, CMC)) plot_prerecall_curve(config, pres, recalls, thresholds, mAP, CMC, 'none') elif config.mode == 'test': # test mode base.resume_from_model(config.resume_test_model) mAP, CMC, pres, recalls, thresholds = test(config, base, loaders) logger('Time: {}; Test Dataset: {}, \nmAP: {} \nRank: {}'.format( time_now(), config.test_dataset, mAP, CMC)) logger( 'Time: {}; Test Dataset: {}, \nprecision: {} \nrecall: {}\nthresholds: {}' .format(time_now(), config.test_dataset, mAP, CMC, pres, recalls, thresholds)) plot_prerecall_curve(config, pres, recalls, thresholds, mAP, CMC, 'none') elif config.mode == 'visualize': # visualization mode base.resume_from_model(config.resume_visualize_model) visualize(config, base, loaders)
def main(config): # init loaders and base loaders = ReIDLoaders(config) base = Base(config) # make directions make_dirs(base.output_path) # init logger logger = Logger(os.path.join(config.output_path, 'log.txt')) logger(config) assert config.mode in ['train', 'test', 'visualize'] if config.mode == 'train': # train mode # automatically resume model from the latest one if config.auto_resume_training_from_lastest_steps: print('resume', base.output_path) start_train_epoch = base.resume_last_model() #start_train_epoch = 0 # main loop for current_epoch in range(start_train_epoch, config.total_train_epochs + 1): # save model base.save_model(current_epoch) # train base.lr_scheduler.step(current_epoch) _, results = train_an_epoch(config, base, loaders) logger('Time: {}; Epoch: {}; {}'.format(time_now(), current_epoch, results)) # test base.save_model(config.total_train_epochs) mAP, CMC = test(config, base, loaders) logger('Time: {}; Test Dataset: {}, \nmAP: {} \nRank: {}'.format( time_now(), config.test_dataset, mAP, CMC)) elif config.mode == 'test': # test mode base.resume_from_model(config.resume_test_model) mAP, CMC = test(config, base, loaders) logger('Time: {}; Test Dataset: {}, \nmAP: {} \nRank: {} with len {}'. format(time_now(), config.test_dataset, mAP, CMC, len(CMC))) elif config.mode == 'visualize': # visualization mode base.resume_from_model(config.resume_visualize_model) visualize(config, base, loaders)
def get_key(self, event): global serialNumber if event.char in '0123456789': self.serial += event.char elif event.keysym == 'Return' and len(self.serial) > 2: serialNumber = self.serial result = test(self.serial) enregistrer(self.serial, result) if result == "pass": level = logging.INFO elif result == "lackOfTest": level = logging.WARNING elif result == "abort": level = logging.ERROR elif result == "unexpected": level = logging.ERROR else: level = logging.CRITICAL logger.log(level, self.serial + " " + result) self.form.resultat.set(result) self.serial = ''
def main(config): loader = Loader(config) base = Base(config, loader) make_dirs(base.output_path) make_dirs(base.save_logs_path) make_dirs(base.save_model_path) logger = Logger(os.path.join(base.save_logs_path, 'log.txt')) logger(config) if config.mode == 'train': if config.resume_train_epoch >= 0: base.resume_model(config.resume_train_epoch) start_train_epoch = config.resume_train_epoch else: start_train_epoch = 0 if config.auto_resume_training_from_lastest_step: root, _, files = os_walk(base.save_model_path) if len(files) > 0: indexes = [] for file in files: indexes.append(int( file.replace('.pkl', '').split('_')[-1])) indexes = sorted(list(set(indexes)), reverse=False) base.resume_model(indexes[-1]) start_train_epoch = indexes[-1] logger( 'Time: {}, automatically resume training from the latest step (model {})' .format(time_now(), indexes[-1])) for current_epoch in range(start_train_epoch, config.total_train_epoch): base.save_model(current_epoch) if current_epoch < config.use_graph: _, result = train_meta_learning(base, loader) logger('Time: {}; Epoch: {}; {}'.format( time_now(), current_epoch, result)) if current_epoch + 1 >= 1 and (current_epoch + 1) % 40 == 0: mAP, CMC = test(config, base, loader) logger( 'Time: {}; Test on Target Dataset: {}, \nmAP: {} \n Rank: {}' .format(time_now(), config.target_dataset, mAP, CMC)) else: _, result = train_with_graph(config, base, loader) logger('Time: {}; Epoch: {}; {}'.format( time_now(), current_epoch, result)) if current_epoch + 1 >= 1 and (current_epoch + 1) % 5 == 0: mAP, CMC = test_with_graph(config, base, loader) logger( 'Time: {}; Test on Target Dataset: {}, \nmAP: {} \n Rank: {}' .format(time_now(), config.target_dataset, mAP, CMC)) elif config.mode == 'test': base.resume_model(config.resume_test_model) mAP, CMC = test_with_graph(config, base, loader) logger('Time: {}; Test on Target Dataset: {}, \nmAP: {} \n Rank: {}'. format(time_now(), config.target_dataset, mAP, CMC))
def boucle(): while (True): os.system('color 0B') os.system('cls') print("=" * 80) print("\n\n\t\t\t") ascii_banner = pyfiglet.figlet_format("PRET : DOUCHER") print(ascii_banner) print("\n\n\n" + "=" * 80) inputUser = "" while inputUser == "": inputUser = str(input()) resultat = test(inputUser) if resultat == "pass": os.system('color 2E') ascii_banner = pyfiglet.figlet_format("PASS") print(ascii_banner) elif resultat == "lackOfTest": os.system('color 5E') ascii_banner = pyfiglet.figlet_format("REPASSER TEST") print(ascii_banner) elif resultat == "abort": os.system('color E0') ascii_banner = pyfiglet.figlet_format("TEST ABORTED") print(ascii_banner) elif resultat == "unexpected": os.system('color F0') ascii_banner = pyfiglet.figlet_format("UNEXPECTED") print(ascii_banner) else: os.system('color CE') ascii_banner = pyfiglet.figlet_format("FAIL") print(ascii_banner) print(resultat) enregistrer(inputUser, resultat) time.sleep(0.45)
def test(self): """ ready = request.test() if ( request.test() ): print 'Nonblocking Send/Recv Operation completed!' """ # It's important to note that requests are only valid # until the communication operation that they represent # completes. # A call to the C test() routine with an invalid # request ID will crash. if( self.valid ): if( core.test( self.id ) ): self.valid = False return True else: return False else: return True
def main_worker(options): torch.manual_seed(options['seed']) os.environ['CUDA_VISIBLE_DEVICES'] = options['gpu'] use_gpu = torch.cuda.is_available() if options['use_cpu']: use_gpu = False if use_gpu: print("Currently using GPU: {}".format(options['gpu'])) cudnn.benchmark = True torch.cuda.manual_seed_all(options['seed']) else: print("Currently using CPU") # Dataset print("{} Preparation".format(options['dataset'])) if 'mnist' in options['dataset']: Data = MNIST_OSR(known=options['known'], dataroot=options['dataroot'], batch_size=options['batch_size'], img_size=options['img_size']) trainloader, testloader, outloader = Data.train_loader, Data.test_loader, Data.out_loader elif 'cifar10' == options['dataset']: Data = CIFAR10_OSR(known=options['known'], dataroot=options['dataroot'], batch_size=options['batch_size'], img_size=options['img_size']) trainloader, testloader, outloader = Data.train_loader, Data.test_loader, Data.out_loader elif 'svhn' in options['dataset']: Data = SVHN_OSR(known=options['known'], dataroot=options['dataroot'], batch_size=options['batch_size'], img_size=options['img_size']) trainloader, testloader, outloader = Data.train_loader, Data.test_loader, Data.out_loader elif 'cifar100' in options['dataset']: Data = CIFAR10_OSR(known=options['known'], dataroot=options['dataroot'], batch_size=options['batch_size'], img_size=options['img_size']) trainloader, testloader = Data.train_loader, Data.test_loader out_Data = CIFAR100_OSR(known=options['unknown'], dataroot=options['dataroot'], batch_size=options['batch_size'], img_size=options['img_size']) outloader = out_Data.test_loader else: Data = Tiny_ImageNet_OSR(known=options['known'], dataroot=options['dataroot'], batch_size=options['batch_size'], img_size=options['img_size']) trainloader, testloader, outloader = Data.train_loader, Data.test_loader, Data.out_loader options['num_classes'] = Data.num_classes # Model print("Creating model: {}".format(options['model'])) if options['cs']: net = classifier32ABN(num_classes=options['num_classes']) else: net = classifier32(num_classes=options['num_classes']) feat_dim = 128 if options['cs']: print("Creating GAN") nz, ns = options['nz'], 1 if 'tiny_imagenet' in options['dataset']: netG = gan.Generator(1, nz, 64, 3) netD = gan.Discriminator(1, 3, 64) else: netG = gan.Generator32(1, nz, 64, 3) netD = gan.Discriminator32(1, 3, 64) fixed_noise = torch.FloatTensor(64, nz, 1, 1).normal_(0, 1) criterionD = nn.BCELoss() # Loss options.update( { 'feat_dim': feat_dim, 'use_gpu': use_gpu } ) Loss = importlib.import_module('loss.'+options['loss']) criterion = getattr(Loss, options['loss'])(**options) if use_gpu: net = nn.DataParallel(net).cuda() criterion = criterion.cuda() if options['cs']: netG = nn.DataParallel(netG, device_ids=[i for i in range(len(options['gpu'].split(',')))]).cuda() netD = nn.DataParallel(netD, device_ids=[i for i in range(len(options['gpu'].split(',')))]).cuda() fixed_noise.cuda() model_path = os.path.join(options['outf'], 'models', options['dataset']) if not os.path.exists(model_path): os.makedirs(model_path) if options['dataset'] == 'cifar100': model_path += '_50' file_name = '{}_{}_{}_{}_{}'.format(options['model'], options['loss'], 50, options['item'], options['cs']) else: file_name = '{}_{}_{}_{}'.format(options['model'], options['loss'], options['item'], options['cs']) if options['eval']: net, criterion = load_networks(net, model_path, file_name, criterion=criterion) results = test(net, criterion, testloader, outloader, epoch=0, **options) print("Acc (%): {:.3f}\t AUROC (%): {:.3f}\t OSCR (%): {:.3f}\t".format(results['ACC'], results['AUROC'], results['OSCR'])) return results params_list = [{'params': net.parameters()}, {'params': criterion.parameters()}] if options['dataset'] == 'tiny_imagenet': optimizer = torch.optim.Adam(params_list, lr=options['lr']) else: optimizer = torch.optim.SGD(params_list, lr=options['lr'], momentum=0.9, weight_decay=1e-4) if options['cs']: optimizerD = torch.optim.Adam(netD.parameters(), lr=options['gan_lr'], betas=(0.5, 0.999)) optimizerG = torch.optim.Adam(netG.parameters(), lr=options['gan_lr'], betas=(0.5, 0.999)) if options['stepsize'] > 0: scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[30,60,90,120]) start_time = time.time() for epoch in range(options['max_epoch']): print("==> Epoch {}/{}".format(epoch+1, options['max_epoch'])) if options['cs']: train_cs(net, netD, netG, criterion, criterionD, optimizer, optimizerD, optimizerG, trainloader, epoch=epoch, **options) train(net, criterion, optimizer, trainloader, epoch=epoch, **options) if options['eval_freq'] > 0 and (epoch+1) % options['eval_freq'] == 0 or (epoch+1) == options['max_epoch']: print("==> Test", options['loss']) results = test(net, criterion, testloader, outloader, epoch=epoch, **options) print("Acc (%): {:.3f}\t AUROC (%): {:.3f}\t OSCR (%): {:.3f}\t".format(results['ACC'], results['AUROC'], results['OSCR'])) save_networks(net, model_path, file_name, criterion=criterion) if options['stepsize'] > 0: scheduler.step() elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) print("Finished. Total elapsed time (h:m:s): {}".format(elapsed)) return results
def main(config): # loaders and base loaders = Loaders(config) base = Base(config, loaders) # make dirs make_dirs(config.save_images_path) make_dirs(config.save_models_path) make_dirs(config.save_features_path) # logger logger = Logger(os.path.join(config.output_path, 'log.txt')) logger(config) if config.mode == 'train': # automatically resume model from the latest one start_train_epoch = 0 root, _, files = os_walk(config.save_models_path) if len(files) > 0: # get indexes of saved models indexes = [] for file in files: indexes.append(int(file.replace('.pkl', '').split('_')[-1])) # remove the bad-case and get available indexes model_num = len(base.model_list) available_indexes = copy.deepcopy(indexes) for element in indexes: if indexes.count(element) < model_num: available_indexes.remove(element) available_indexes = sorted(list(set(available_indexes)), reverse=True) unavailable_indexes = list( set(indexes).difference(set(available_indexes))) if len(available_indexes ) > 0: # resume model from the latest model base.resume_model(available_indexes[0]) start_train_epoch = available_indexes[0] + 1 logger( 'Time: {}, automatically resume training from the latest step (model {})' .format(time_now(), available_indexes[0])) else: # logger('Time: {}, there are no available models') # main loop for current_epoch in range( start_train_epoch, config.warmup_reid_epoches + config.warmup_gan_epoches + config.train_epoches): # test if current_epoch % 10 == 0 and current_epoch > config.warmup_reid_epoches + config.warmup_gan_epoches: results = test(config, base, loaders, brief=True) for key in results.keys(): logger('Time: {}\n Setting: {}\n {}'.format( time_now(), key, results[key])) # visualize generated images if current_epoch % 10 == 0 or current_epoch <= 10: visualize(config, loaders, base, current_epoch) # train if current_epoch < config.warmup_reid_epoches: # warmup reid model results = train_an_epoch(config, loaders, base, current_epoch, train_gan=True, train_reid=True, train_pixel=False, optimize_sl_enc=True) elif current_epoch < config.warmup_reid_epoches + config.warmup_gan_epoches: # warmup GAN model results = train_an_epoch(config, loaders, base, current_epoch, train_gan=True, train_reid=False, train_pixel=False, optimize_sl_enc=False) else: # joint train results = train_an_epoch(config, loaders, base, current_epoch, train_gan=True, train_reid=True, train_pixel=True, optimize_sl_enc=True) logger('Time: {}; Epoch: {}; {}'.format(time_now(), current_epoch, results)) # save model base.save_model(current_epoch) # test results = test(config, base, loaders, brief=False) for key in results.keys(): logger('Time: {}\n Setting: {}\n {}'.format( time_now(), key, results[key])) elif config.mode == 'test': # resume from pre-trained model and test base.resume_model_from_path(config.pretrained_model_path, config.pretrained_model_epoch) results = test(config, base, loaders, brief=False) for key in results.keys(): logger('Time: {}\n Setting: {}\n {}'.format( time_now(), key, results[key]))
tgt_data_loader_test = get_data_loader(params.tgt_dataset, train=False) # init models classifier = init_model(net=Classifier(), restore=params.c_model_restore) generator = init_model(net=Generator(), restore=params.g_model_restore) critic = init_model(net=Discriminator(input_dims=params.d_input_dims, hidden_dims=params.d_hidden_dims, output_dims=params.d_output_dims), restore=params.d_model_restore) # train models print("=== Training models ===") print(">>> Classifier <<<") print(classifier) print(">>> Generator <<<") print(generator) print(">>> Critic <<<") print(critic) if not (params.eval_only and classifier.restored and generator.restored and critic.restored): classifier, generator = train(classifier, generator, critic, src_data_loader, tgt_data_loader) # evaluate models print("=== Evaluating models ===") print(">>> on source domain <<<") test(classifier, generator, src_data_loader, params.src_dataset) print(">>> on target domain <<<") test(classifier, generator, tgt_data_loader, params.tgt_dataset)
def main(config): # loaders and base loaders = Loaders(config) base = Base(config, loaders) # make dirs make_dirs(config.save_images_path) make_dirs(config.save_wp_models_path) make_dirs(config.save_st_models_path) make_dirs(config.save_features_path) logger = setup_logger('adaptation_reid', config.output_path, if_train=True) if config.mode == 'train': if config.resume: # automatically resume model from the latest one if config.resume_epoch_num == 0: start_train_epoch = 0 root, _, files = os_walk(config.save_models_path) if len(files) > 0: # get indexes of saved models indexes = [] for file in files: indexes.append( int(file.replace('.pkl', '').split('_')[-1])) # remove the bad-case and get available indexes model_num = len(base.model_list) available_indexes = copy.deepcopy(indexes) for element in indexes: if indexes.count(element) < model_num: available_indexes.remove(element) available_indexes = sorted(list(set(available_indexes)), reverse=True) unavailable_indexes = list( set(indexes).difference(set(available_indexes))) if len(available_indexes ) > 0: # resume model from the latest model base.resume_model(available_indexes[0]) start_train_epoch = available_indexes[0] + 1 logger.info( 'Time: {}, automatically resume training from the latest step (model {})' .format(time_now(), available_indexes[0])) else: # logger.info('Time: {}, there are no available models') else: start_train_epoch = config.resume_epoch_num else: start_train_epoch = 0 # main loop for current_epoch in range( start_train_epoch, config.warmup_reid_epoches + config.warmup_gan_epoches + config.warmup_adaptation_epoches): # train if current_epoch < config.warmup_reid_epoches: # warmup reid model results = train_an_epoch(config, 0, loaders, base, current_epoch, train_gan=True, train_reid=True, self_training=False, optimize_sl_enc=True, train_adaptation=False) elif current_epoch < config.warmup_reid_epoches + config.warmup_gan_epoches: # warmup GAN model results = train_an_epoch(config, 0, loaders, base, current_epoch, train_gan=True, train_reid=False, self_training=False, optimize_sl_enc=False, train_adaptation=False) # joint train elif current_epoch < config.warmup_reid_epoches + config.warmup_gan_epoches + config.warmup_adaptation_epoches: #warmup adaptation results = train_an_epoch(config, 0, loaders, base, current_epoch, train_gan=True, train_reid=False, self_training=False, optimize_sl_enc=False, train_adaptation=True) print("another epoch") logger.info('Time: {}; Epoch: {}; {}'.format( time_now(), current_epoch, results)) # save model if current_epoch % config.save_model_interval == 0: base.save_model(current_epoch, True) if current_epoch % config.test_model_interval == 0: visualize(config, loaders, base, current_epoch) test(config, base, loaders, epoch=0, brief=False) total_wp_epoches = config.warmup_reid_epoches + config.warmup_gan_epoches for iter_n in range(config.iteration_number): src_dataset, src_dataloader, trg_dataset, trg_dataloader = loaders.get_self_train_loaders( ) trg_labeled_dataloader = generate_labeled_dataset( base, iter_n, src_dataset, src_dataloader, trg_dataset, trg_dataloader) for epoch in range(total_wp_epoches + 1, config.self_train_epoch): results = train_an_epoch( config, iter_n, loaders, base, epoch, train_gan=True, train_reid=False, self_training=True, optimize_sl_enc=True, trg_labeled_loader=trg_labeled_dataloader) logger.info('Time: {}; Epoch: {}; {}'.format( time_now(), current_epoch, results)) if epoch % config.save_model_interval == 0: base.save_model(iter_n * config.self_train_epoch + epoch, False) elif config.mode == 'test': # resume from pre-trained model and test base.resume_model_from_path(config.pretrained_model_path, config.pretrained_model_epoch) cmc, map = test(config, base, loaders, epoch=100, brief=False)
import importlib import core def clsr(): core.os.system("cls") core.detect_face() inp = -1 while inp != 3: clsr() print("Recognizer App") print("=================") print("1. Face Recognition") print("2. Live Recognition lock") print("3. Live Recognition unlock") print("3. Exit") print(">> ", end="") inp = int(input()) if (inp == 1): clsr() core.test() elif (inp == 2): clsr() core.live('lock') elif (inp == 3): clsr() core.live('unlock')
def main(): torch.manual_seed(options['seed']) os.environ['CUDA_VISIBLE_DEVICES'] = options['gpu'] use_gpu = torch.cuda.is_available() if options['use_cpu']: use_gpu = False feat_dim = 2 if 'cnn' in options['model'] else 512 options.update( { 'feat_dim': feat_dim, 'use_gpu': use_gpu } ) if use_gpu: print("Currently using GPU: {}".format(options['gpu'])) cudnn.benchmark = True torch.cuda.manual_seed_all(options['seed']) else: print("Currently using CPU") dataset = datasets.create(options['dataset'], **options) out_dataset = datasets.create(options['out_dataset'], **options) trainloader, testloader = dataset.trainloader, dataset.testloader outloader = out_dataset.testloader options.update( { 'num_classes': dataset.num_classes } ) print("Creating model: {}".format(options['model'])) if 'cnn' in options['model']: net = ConvNet(num_classes=dataset.num_classes) else: if options['cs']: net = resnet34ABN(num_classes=dataset.num_classes, num_bns=2) else: net = ResNet34(dataset.num_classes) if options['cs']: print("Creating GAN") nz = options['nz'] netG = gan.Generator32(1, nz, 64, 3) # ngpu, nz, ngf, nc netD = gan.Discriminator32(1, 3, 64) # ngpu, nc, ndf fixed_noise = torch.FloatTensor(64, nz, 1, 1).normal_(0, 1) criterionD = nn.BCELoss() Loss = importlib.import_module('loss.'+options['loss']) criterion = getattr(Loss, options['loss'])(**options) if use_gpu: net = nn.DataParallel(net, device_ids=[i for i in range(len(options['gpu'].split(',')))]).cuda() criterion = criterion.cuda() if options['cs']: netG = nn.DataParallel(netG, device_ids=[i for i in range(len(options['gpu'].split(',')))]).cuda() netD = nn.DataParallel(netD, device_ids=[i for i in range(len(options['gpu'].split(',')))]).cuda() fixed_noise.cuda() model_path = os.path.join(options['outf'], 'models', options['dataset']) file_name = '{}_{}_{}_{}_{}'.format(options['model'], options['dataset'], options['loss'], str(options['weight_pl']), str(options['cs'])) if options['eval']: net, criterion = load_networks(net, model_path, file_name, criterion=criterion) results = test(net, criterion, testloader, outloader, epoch=0, **options) print("Acc (%): {:.3f}\t AUROC (%): {:.3f}\t OSCR (%): {:.3f}\t".format(results['ACC'], results['AUROC'], results['OSCR'])) return params_list = [{'params': net.parameters()}, {'params': criterion.parameters()}] optimizer = torch.optim.Adam(params_list, lr=options['lr']) if options['cs']: optimizerD = torch.optim.Adam(netD.parameters(), lr=options['gan_lr'], betas=(0.5, 0.999)) optimizerG = torch.optim.Adam(netG.parameters(), lr=options['gan_lr'], betas=(0.5, 0.999)) if options['stepsize'] > 0: scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[30, 60, 90, 120]) start_time = time.time() score_now = 0.0 for epoch in range(options['max_epoch']): print("==> Epoch {}/{}".format(epoch+1, options['max_epoch'])) if options['cs']: train_cs(net, netD, netG, criterion, criterionD, optimizer, optimizerD, optimizerG, trainloader, epoch=epoch, **options) train(net, criterion, optimizer, trainloader, epoch=epoch, **options) if options['eval_freq'] > 0 and (epoch+1) % options['eval_freq'] == 0 or (epoch+1) == options['max_epoch']: print("==> Test") results = test(net, criterion, testloader, outloader, epoch=epoch, **options) print("Acc (%): {:.3f}\t AUROC (%): {:.3f}\t OSCR (%): {:.3f}\t".format(results['ACC'], results['AUROC'], results['OSCR'])) save_networks(net, model_path, file_name, criterion=criterion) if options['cs']: save_GAN(netG, netD, model_path, file_name) fake = netG(fixed_noise) GAN_path = os.path.join(model_path, 'samples') mkdir_if_missing(GAN_path) vutils.save_image(fake.data, '%s/gan_samples_epoch_%03d.png'%(GAN_path, epoch), normalize=True) if options['stepsize'] > 0: scheduler.step() elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
def main(config): # init loaders and base loaders = Loaders(config) base = Base(config, loaders) # make directions make_dirs(base.output_path) make_dirs(base.save_model_path) make_dirs(base.save_logs_path) make_dirs(base.save_visualize_market_path) make_dirs(base.save_visualize_duke_path) # init logger logger = Logger( os.path.join(os.path.join(config.output_path, 'logs/'), 'log.txt')) logger('\n' * 3) logger(config) if config.mode == 'train': # train mode # resume model from the resume_train_epoch if config.resume_train_epoch >= 0: base.resume_model(config.resume_train_epoch) start_train_epoch = config.resume_train_epoch else: start_train_epoch = 0 # automatically resume model from the latest one if config.auto_resume_training_from_lastest_steps: root, _, files = os_walk(base.save_model_path) if len(files) > 0: # get indexes of saved models indexes = [] for file in files: indexes.append(int( file.replace('.pkl', '').split('_')[-1])) indexes = sorted(list(set(indexes)), reverse=False) # resume model from the latest model base.resume_model(indexes[-1]) # start_train_epoch = indexes[-1] logger( 'Time: {}, automatically resume training from the latest step (model {})' .format(time_now(), indexes[-1])) # main loop for current_epoch in range(start_train_epoch, config.total_train_epochs): # save model base.save_model(current_epoch) # train base.lr_scheduler.step(current_epoch) _, results = train_an_epoch(config, base, loaders) logger('Time: {}; Epoch: {}; {}'.format(time_now(), current_epoch, results)) # test if (current_epoch + 1) % 40 == 0 and current_epoch + 1 >= 0: market_map, market_rank = test(config, base, loaders, 'market') duke_map, duke_rank = test(config, base, loaders, 'duke') logger( 'Time: {}, Dataset: Market \nmAP: {} \nRank: {}'.format( time_now(), market_map, market_rank)) logger('Time: {}, Dataset: Duke \nmAP: {} \nRank: {}'.format( time_now(), duke_map, duke_rank)) logger('') elif config.mode == 'test': # test mode # resume from the resume_test_epoch if config.resume_test_epoch >= 0: base.resume_model(config.resume_test_epoch) # test market_map, market_rank = test(config, base, loaders, 'market') duke_map, duke_rank = test(config, base, loaders, 'duke') logger('Time: {}, Dataset: Market \nmAP: {} \nRank: {}'.format( time_now(), market_map, market_rank)) logger('Time: {}, Dataset: Duke \nmAP: {} \nRank: {}'.format( time_now(), duke_map, duke_rank)) logger('') elif config.mode == 'visualize': # visualization mode # resume from the resume_visualize_epoch if config.resume_visualize_epoch >= 0: base.resume_model(config.resume_visualize_epoch) # visualization visualize_ranking_list(config, base, loaders, 'market') visualize_ranking_list(config, base, loaders, 'duke')
'pascalvoc12', transform=transform, target_transform=target_transform, custom_type='B') tasks.append(t1) tasks.append(t2) prev_tasks = [] task_loss_after_each_train = [] if opt.pretrained: model = load_pretrained(opt.load_pretrained) start_index = opt.pretrained test_results = test(tasks, model, opt, visualizer=visualizer, limit_updates=opt.limit_test_updates) # for i in range(0,start_index): # test_results=test(tasks,model,opt) # task_loss_after_each_train.append(test_results) # prev_tasks.append(tasks[i]) else: t = tasks[0] model = networks.__dict__[opt.model](t.name, t.type, t.num_classes, encoder=opt.encoder, decoder=opt.decoder, setting=opt.model_setting) for t in tasks: