def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled=True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) if args.dataset in utils.LARGE_DATASETS: model = NetworkLarge(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype) else: model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype) model = model.cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() optimizer = torch.optim.SGD( model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay ) train_transform, valid_transform = utils.data_transforms(args.dataset, args.cutout, args.cutout_length) if args.dataset == "CIFAR100": train_data = dset.CIFAR100(root=args.datapath, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR100(root=args.datapath, train=False, download=True, transform=valid_transform) elif args.dataset == "CIFAR10": train_data = dset.CIFAR10(root=args.datapath, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR10(root=args.datapath, train=False, download=True, transform=valid_transform) elif args.dataset == 'MIT67': dset_cls = dset.ImageFolder data_path = '%s/MIT67/train' % args.datapath # 'data/MIT67/train' val_path = '%s/MIT67/test' % args.datapath # 'data/MIT67/val' train_data = dset_cls(root=data_path, transform=train_transform) valid_data = dset_cls(root=val_path, transform=valid_transform) elif args.dataset == 'Sport8': dset_cls = dset.ImageFolder data_path = '%s/Sport8/train' % args.datapath # 'data/Sport8/train' val_path = '%s/Sport8/test' % args.datapath # 'data/Sport8/val' train_data = dset_cls(root=data_path, transform=train_transform) valid_data = dset_cls(root=val_path, transform=valid_transform) elif args.dataset == "flowers102": dset_cls = dset.ImageFolder data_path = '%s/flowers102/train' % args.datapath val_path = '%s/flowers102/test' % args.datapath train_data = dset_cls(root=data_path, transform=train_transform) valid_data = dset_cls(root=val_path, transform=valid_transform) train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs)) best_acc = 0.0 for epoch in range(args.epochs): scheduler.step() logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0]) model.drop_path_prob = args.drop_path_prob * epoch / args.epochs train_acc, train_obj = train(train_queue, model, criterion, optimizer) logging.info('train_acc %f', train_acc) valid_acc, valid_obj = infer(valid_queue, model, criterion) if valid_acc > best_acc: best_acc = valid_acc logging.info('valid_acc %f, best_acc %f', valid_acc, best_acc) utils.save(model, os.path.join(args.save, 'weights.pt'))
def main(): if not torch.cuda.is_available(): logging.info('No GPU device available') sys.exit(1) np.random.seed(args.seed) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info("args = %s", args) logging.info("unparsed args = %s", unparsed) num_gpus = torch.cuda.device_count() gpu_logger = GpuLogThread(list(range(num_gpus)), writer, seconds=10 if args.test else 300) gpu_logger.start() genotype = genotypes.load_genotype(args.arch, skip_cons=args.arch_pref_sc) print('---------Genotype---------') logging.info(genotype) print('--------------------------') if args.dataset == "CIFAR100": CLASSES = 100 elif args.dataset == "CIFAR10": CLASSES = 10 elif args.dataset == 'MIT67': dset_cls = dset.ImageFolder CLASSES = 67 elif args.dataset == 'Sport8': dset_cls = dset.ImageFolder CLASSES = 8 elif args.dataset == "flowers102": dset_cls = dset.ImageFolder CLASSES = 102 if args.dataset in utils.LARGE_DATASETS: model = NetworkLarge(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype) else: model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype) if num_gpus > 1: model = nn.DataParallel(model) model = model.cuda() logging.info("param count = %d", utils.count_parameters(model)) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) train_transform, valid_transform = utils.data_transforms( args.dataset, args.cutout, args.cutout_length) if args.dataset == "CIFAR100": train_data = dset.CIFAR100(root=args.tmp_data_dir, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR100(root=args.tmp_data_dir, train=False, download=True, transform=valid_transform) elif args.dataset == "CIFAR10": train_data = dset.CIFAR10(root=args.tmp_data_dir, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR10(root=args.tmp_data_dir, train=False, download=True, transform=valid_transform) elif args.dataset == 'MIT67': dset_cls = dset.ImageFolder data_path = '%s/MIT67/train' % args.tmp_data_dir # 'data/MIT67/train' val_path = '%s/MIT67/test' % args.tmp_data_dir # 'data/MIT67/val' train_data = dset_cls(root=data_path, transform=train_transform) valid_data = dset_cls(root=val_path, transform=valid_transform) elif args.dataset == 'Sport8': dset_cls = dset.ImageFolder data_path = '%s/Sport8/train' % args.tmp_data_dir # 'data/Sport8/train' val_path = '%s/Sport8/test' % args.tmp_data_dir # 'data/Sport8/val' train_data = dset_cls(root=data_path, transform=train_transform) valid_data = dset_cls(root=val_path, transform=valid_transform) elif args.dataset == "flowers102": dset_cls = dset.ImageFolder data_path = '%s/flowers102/train' % args.tmp_data_dir val_path = '%s/flowers102/test' % args.tmp_data_dir train_data = dset_cls(root=data_path, transform=train_transform) valid_data = dset_cls(root=val_path, transform=valid_transform) train_iterator = utils.DynamicBatchSizeLoader( torch.utils.data.DataLoader(train_data, batch_size=args.batch_multiples, shuffle=True, pin_memory=True, num_workers=args.workers), args.batch_size_min) test_iterator = utils.DynamicBatchSizeLoader( torch.utils.data.DataLoader(valid_data, batch_size=args.batch_multiples, shuffle=False, pin_memory=True, num_workers=args.workers), args.batch_size_min) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, float(args.epochs)) best_acc = 0.0 for epoch in range(args.epochs): lr = scheduler.get_lr()[0] drop_path_prob = args.drop_path_prob * epoch / args.epochs logging.info('Epoch: %d lr %e', epoch, lr) if num_gpus > 1: model.module.drop_path_prob = drop_path_prob else: model.drop_path_prob = drop_path_prob epoch_start_time = time.time() train_acc, train_obj = train(train_iterator, test_iterator, model, criterion, optimizer, gpu_logger) logging.info('Train_acc: %f', train_acc) test_acc, test_obj = infer(test_iterator, model, criterion) if test_acc > best_acc: best_acc = test_acc logging.info('Valid_acc: %f', test_acc) epoch_duration = time.time() - epoch_start_time utils.save(model, os.path.join(args.save, 'weights.pt')) # log info print('Epoch time: %ds.' % epoch_duration) writer.add_scalar('epoch/lr', lr, epoch) writer.add_scalar('epoch/drop_path_prob', drop_path_prob, epoch) writer.add_scalar('epoch/seconds', epoch_duration, epoch) writer.add_scalar('epoch/batch_size', train_iterator.batch_size, epoch) writer.add_scalar('train/accuracy', train_acc, epoch) writer.add_scalar('train/loss', train_obj, epoch) writer.add_scalar('test/accuracy', test_acc, epoch) writer.add_scalar('test/loss', test_obj, epoch) scheduler.step() gpu_logger.stop()
def main(): global best_top1, args, logger args.distributed = False if 'WORLD_SIZE' in os.environ: args.distributed = int(os.environ['WORLD_SIZE']) > 1 # commented because it is now set as an argparse param. # args.gpu = 0 args.world_size = 1 if args.distributed: args.gpu = args.local_rank % torch.cuda.device_count() torch.cuda.set_device(args.gpu) torch.distributed.init_process_group(backend='nccl', init_method='env://') args.world_size = torch.distributed.get_world_size() # note the gpu is used for directory creation and log files # which is needed when run as multiple processes args = utils.initialize_files_and_args(args) logger = utils.logging_setup(args.log_file_path) if args.fp16: assert torch.backends.cudnn.enabled, "fp16 mode requires cudnn backend to be enabled." if args.static_loss_scale != 1.0: if not args.fp16: logger.info( "Warning: if --fp16 is not used, static_loss_scale will be ignored." ) # # load the correct ops dictionary op_dict_to_load = "operations.%s" % args.ops logger.info('loading op dict: ' + str(op_dict_to_load)) op_dict = eval(op_dict_to_load) # load the correct primitives list primitives_to_load = "genotypes.%s" % args.primitives logger.info('loading primitives:' + primitives_to_load) primitives = eval(primitives_to_load) logger.info('primitives: ' + str(primitives)) # create model genotype = eval("genotypes.%s" % args.arch) # get the number of output channels classes = dataset.class_dict[args.dataset] # create the neural network if args.dataset == 'imagenet': model = NetworkImageNet(args.init_channels, classes, args.layers, args.auxiliary, genotype, op_dict=op_dict, C_mid=args.mid_channels) flops_shape = [1, 3, 224, 224] else: model = NetworkCIFAR(args.init_channels, classes, args.layers, args.auxiliary, genotype, op_dict=op_dict, C_mid=args.mid_channels) flops_shape = [1, 3, 32, 32] model.drop_path_prob = 0.0 # if args.pretrained: # logger.info("=> using pre-trained model '{}'".format(args.arch)) # model = models.__dict__[args.arch](pretrained=True) # else: # logger.info("=> creating model '{}'".format(args.arch)) # model = models.__dict__[args.arch]() if args.flops: model = model.cuda() logger.info("param size = %fMB", utils.count_parameters_in_MB(model)) logger.info("flops_shape = " + str(flops_shape)) logger.info("flops = " + utils.count_model_flops(model, data_shape=flops_shape)) return if args.sync_bn: import apex logger.info("using apex synced BN") model = apex.parallel.convert_syncbn_model(model) model = model.cuda() if args.fp16: model = network_to_half(model) if args.distributed: # By default, apex.parallel.DistributedDataParallel overlaps communication with # computation in the backward pass. # model = DDP(model) # delay_allreduce delays all communication to the end of the backward pass. model = DDP(model, delay_allreduce=True) # define loss function (criterion) and optimizer criterion = nn.CrossEntropyLoss().cuda() # Scale learning rate based on global batch size args.learning_rate = args.learning_rate * float( args.batch_size * args.world_size) / 256. init_lr = args.learning_rate / args.warmup_lr_divisor optimizer = torch.optim.SGD(model.parameters(), init_lr, momentum=args.momentum, weight_decay=args.weight_decay) # epoch_count = args.epochs - args.start_epoch # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(epoch_count)) # scheduler = warmup_scheduler.GradualWarmupScheduler( # optimizer, args.warmup_lr_divisor, args.warmup_epochs, scheduler) if args.fp16: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.static_loss_scale, dynamic_loss_scale=args.dynamic_loss_scale) # Optionally resume from a checkpoint if args.resume or args.evaluate: if args.evaluate: args.resume = args.evaluate # Use a local scope to avoid dangling references def resume(): if os.path.isfile(args.resume): logger.info("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load( args.resume, map_location=lambda storage, loc: storage.cuda(args.gpu)) args.start_epoch = checkpoint['epoch'] if 'best_top1' in checkpoint: best_top1 = checkpoint['best_top1'] model.load_state_dict(checkpoint['state_dict']) # An FP16_Optimizer instance's state dict internally stashes the master params. optimizer.load_state_dict(checkpoint['optimizer']) # TODO(ahundt) make sure scheduler loading isn't broken if 'lr_scheduler' in checkpoint: scheduler.load_state_dict(checkpoint['lr_scheduler']) elif 'lr_schedule' in checkpoint: lr_schedule = checkpoint['lr_schedule'] logger.info("=> loaded checkpoint '{}' (epoch {})".format( args.resume, checkpoint['epoch'])) else: logger.info("=> no checkpoint found at '{}'".format( args.resume)) resume() # # Data loading code # traindir = os.path.join(args.data, 'train') # valdir = os.path.join(args.data, 'val') # if(args.arch == "inception_v3"): # crop_size = 299 # val_size = 320 # I chose this value arbitrarily, we can adjust. # else: # crop_size = 224 # val_size = 256 # train_dataset = datasets.ImageFolder( # traindir, # transforms.Compose([ # transforms.RandomResizedCrop(crop_size), # transforms.RandomHorizontalFlip(), # autoaugment.ImageNetPolicy(), # # transforms.ToTensor(), # Too slow, moved to data_prefetcher() # # normalize, # ])) # val_dataset = datasets.ImageFolder(valdir, transforms.Compose([ # transforms.Resize(val_size), # transforms.CenterCrop(crop_size) # ])) # train_sampler = None # val_sampler = None # if args.distributed: # train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) # val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset) # train_loader = torch.utils.data.DataLoader( # train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), # num_workers=args.workers, pin_memory=True, sampler=train_sampler, collate_fn=fast_collate) # val_loader = torch.utils.data.DataLoader( # val_dataset, # batch_size=args.batch_size, shuffle=False, # num_workers=args.workers, pin_memory=True, # sampler=val_sampler, # collate_fn=fast_collate) # Get preprocessing functions (i.e. transforms) to apply on data # normalize_as_tensor = False because we normalize and convert to a # tensor in our custom prefetching function, rather than as part of # the transform preprocessing list. train_transform, valid_transform = utils.get_data_transforms( args, normalize_as_tensor=False) # Get the training queue, select training and validation from training set train_loader, val_loader = dataset.get_training_queues( args.dataset, train_transform, valid_transform, args.data, args.batch_size, train_proportion=1.0, collate_fn=fast_collate, distributed=args.distributed, num_workers=args.workers) if args.evaluate: if args.dataset == 'cifar10': # evaluate best model weights on cifar 10.1 # https://github.com/modestyachts/CIFAR-10.1 train_transform, valid_transform = utils.get_data_transforms(args) # Get the training queue, select training and validation from training set # Get the training queue, use full training and test set train_queue, valid_queue = dataset.get_training_queues( args.dataset, train_transform, valid_transform, args.data, args.batch_size, train_proportion=1.0, search_architecture=False) test_data = cifar10_1.CIFAR10_1(root=args.data, download=True, transform=valid_transform) test_queue = torch.utils.data.DataLoader( test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=args.workers) eval_stats = evaluate(args, model, criterion, train_queue=train_queue, valid_queue=valid_queue, test_queue=test_queue) with open(args.stats_file, 'w') as f: # TODO(ahundt) fix "TypeError: 1869 is not JSON serializable" to include arg info, see train.py # arg_dict = vars(args) # arg_dict.update(eval_stats) # json.dump(arg_dict, f) json.dump(eval_stats, f) logger.info("flops = " + utils.count_model_flops(model)) logger.info(utils.dict_to_log_string(eval_stats)) logger.info('\nEvaluation of Loaded Model Complete! Save dir: ' + str(args.save)) else: validate(val_loader, model, criterion, args) return lr_schedule = cosine_power_annealing( epochs=args.epochs, max_lr=args.learning_rate, min_lr=args.learning_rate_min, warmup_epochs=args.warmup_epochs, exponent_order=args.lr_power_annealing_exponent_order, restart_lr=args.restart_lr) epochs = np.arange(args.epochs) + args.start_epoch stats_csv = args.epoch_stats_file stats_csv = stats_csv.replace('.json', '.csv') with tqdm(epochs, dynamic_ncols=True, disable=args.local_rank != 0, leave=False) as prog_epoch: best_stats = {} stats = {} epoch_stats = [] best_epoch = 0 for epoch, learning_rate in zip(prog_epoch, lr_schedule): if args.distributed and train_loader.sampler is not None: train_loader.sampler.set_epoch(int(epoch)) # if args.distributed: # train_sampler.set_epoch(epoch) # update the learning rate for param_group in optimizer.param_groups: param_group['lr'] = learning_rate # scheduler.step() model.drop_path_prob = args.drop_path_prob * float(epoch) / float( args.epochs) # train for one epoch train_stats = train(train_loader, model, criterion, optimizer, int(epoch), args) if args.prof: break # evaluate on validation set top1, val_stats = validate(val_loader, model, criterion, args) stats.update(train_stats) stats.update(val_stats) # stats['lr'] = '{0:.5f}'.format(scheduler.get_lr()[0]) stats['lr'] = '{0:.5f}'.format(learning_rate) stats['epoch'] = epoch # remember best top1 and save checkpoint if args.local_rank == 0: is_best = top1 > best_top1 best_top1 = max(top1, best_top1) stats['best_top1'] = '{0:.3f}'.format(best_top1) if is_best: best_epoch = epoch best_stats = copy.deepcopy(stats) stats['best_epoch'] = best_epoch stats_str = utils.dict_to_log_string(stats) logger.info(stats_str) save_checkpoint( { 'epoch': epoch, 'arch': args.arch, 'state_dict': model.state_dict(), 'best_top1': best_top1, 'optimizer': optimizer.state_dict(), # 'lr_scheduler': scheduler.state_dict() 'lr_schedule': lr_schedule, 'stats': best_stats }, is_best, path=args.save) prog_epoch.set_description( 'Overview ***** best_epoch: {0} best_valid_top1: {1:.2f} ***** Progress' .format(best_epoch, best_top1)) epoch_stats += [copy.deepcopy(stats)] with open(args.epoch_stats_file, 'w') as f: json.dump(epoch_stats, f, cls=utils.NumpyEncoder) utils.list_of_dicts_to_csv(stats_csv, epoch_stats) stats_str = utils.dict_to_log_string(best_stats, key_prepend='best_') logger.info(stats_str) with open(args.stats_file, 'w') as f: arg_dict = vars(args) arg_dict.update(best_stats) json.dump(arg_dict, f, cls=utils.NumpyEncoder) with open(args.epoch_stats_file, 'w') as f: json.dump(epoch_stats, f, cls=utils.NumpyEncoder) utils.list_of_dicts_to_csv(stats_csv, epoch_stats) logger.info('Training of Final Model Complete! Save dir: ' + str(args.save))
def main(): if not torch.cuda.is_available(): logging.info('No GPU device available') sys.exit(1) np.random.seed(args.seed) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled=True torch.cuda.manual_seed(args.seed) logging.info("args = %s", args) logging.info("unparsed args = %s", unparsed) num_gpus = torch.cuda.device_count() genotype = eval("genotypes.%s" % args.arch) print('---------Genotype---------') logging.info(genotype) print('--------------------------') if args.dataset in utils.LARGE_DATASETS: model = NetworkLarge(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype) else: model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype) if num_gpus > 1: model = torch.nn.DataParallel(model) model = model.cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() optimizer = torch.optim.SGD( model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay ) train_transform, valid_transform = utils.data_transforms(args.dataset,args.cutout,args.cutout_length) if args.dataset == "CIFAR100": train_data = dset.CIFAR100(root=args.tmp_data_dir, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR100(root=args.tmp_data_dir, train=False, download=True, transform=valid_transform) elif args.dataset == "CIFAR10": train_data = dset.CIFAR10(root=args.tmp_data_dir, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR10(root=args.tmp_data_dir, train=False, download=True, transform=valid_transform) elif args.dataset == 'mit67': dset_cls = dset.ImageFolder data_path = '%s/MIT67/train' % args.tmp_data_dir val_path = '%s/MIT67/test' % args.tmp_data_dir train_data = dset_cls(root=data_path, transform=train_transform) valid_data = dset_cls(root=val_path, transform=valid_transform) elif args.dataset == 'sport8': dset_cls = dset.ImageFolder data_path = '%s/Sport8/train' % args.tmp_data_dir val_path = '%s/Sport8/test' % args.tmp_data_dir train_data = dset_cls(root=data_path, transform=train_transform) valid_data = dset_cls(root=val_path, transform=valid_transform) elif args.dataset == "flowers102": dset_cls = dset.ImageFolder data_path = '%s/flowers102/train' % args.tmp_data_dir val_path = '%s/flowers102/test' % args.tmp_data_dir train_data = dset_cls(root=data_path, transform=train_transform) valid_data = dset_cls(root=val_path, transform=valid_transform) train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=args.workers) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=args.workers) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs)) best_acc = 0.0 for epoch in range(args.epochs): scheduler.step() logging.info('Epoch: %d lr %e', epoch, scheduler.get_lr()[0]) if num_gpus > 1: model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs else: model.drop_path_prob = args.drop_path_prob * epoch / args.epochs start_time = time.time() train_acc, train_obj = train(train_queue, model, criterion, optimizer) logging.info('Train_acc: %f', train_acc) valid_acc, valid_obj = infer(valid_queue, model, criterion) if valid_acc > best_acc: best_acc = valid_acc logging.info('Valid_acc: %f', valid_acc) end_time = time.time() duration = end_time - start_time print('Epoch time: %ds.' % duration ) utils.save(model, os.path.join(args.save, 'weights.pt'))
def main(): if not torch.cuda.is_available(): logger.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled=True torch.cuda.manual_seed(args.seed) logger.info('gpu device = %d' % args.gpu) logger.info("args = %s", args) # # load the correct ops dictionary op_dict_to_load = "operations.%s" % args.ops logger.info('loading op dict: ' + str(op_dict_to_load)) op_dict = eval(op_dict_to_load) # load the correct primitives list primitives_to_load = "genotypes.%s" % args.primitives logger.info('loading primitives:' + primitives_to_load) primitives = eval(primitives_to_load) logger.info('primitives: ' + str(primitives)) genotype = eval("genotypes.%s" % args.arch) cnn_model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype, op_dict=op_dict, C_mid=args.mid_channels) if args.parallel: cnn_model = nn.DataParallel(cnn_model).cuda() else: cnn_model = cnn_model.cuda() logger.info("param size = %fMB", utils.count_parameters_in_MB(cnn_model)) if args.flops: cnn_model.drop_path_prob = 0.0 logger.info("flops = " + utils.count_model_flops(cnn_model, data_shape=[1, 3, 224, 224])) exit(1) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth) criterion_smooth = criterion_smooth.cuda() optimizer = torch.optim.SGD( cnn_model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay ) traindir = os.path.join(args.data, 'train') validdir = os.path.join(args.data, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_data = dset.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), autoaugment.ImageNetPolicy(), # transforms.ColorJitter( # brightness=0.4, # contrast=0.4, # saturation=0.4, # hue=0.2), transforms.ToTensor(), normalize, ])) valid_data = dset.ImageFolder( validdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])) train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=8) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=8) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma) prog_epoch = tqdm(range(args.epochs), dynamic_ncols=True) best_valid_acc = 0.0 best_epoch = 0 best_stats = {} best_acc_top1 = 0 weights_file = os.path.join(args.save, 'weights.pt') for epoch in prog_epoch: scheduler.step() cnn_model.drop_path_prob = args.drop_path_prob * epoch / args.epochs train_acc, train_obj = train.train(args, train_queue, cnn_model, criterion, optimizer) stats = train.infer(args, valid_queue, cnn_model, criterion) is_best = False if stats['valid_acc'] > best_valid_acc: # new best epoch, save weights utils.save(cnn_model, weights_file) best_epoch = epoch best_valid_acc = stats['valid_acc'] best_stats = stats best_stats['lr'] = scheduler.get_lr()[0] best_stats['epoch'] = best_epoch best_train_loss = train_obj best_train_acc = train_acc is_best = True logger.info('epoch, %d, train_acc, %f, valid_acc, %f, train_loss, %f, valid_loss, %f, lr, %e, best_epoch, %d, best_valid_acc, %f, ' + utils.dict_to_log_string(stats), epoch, train_acc, stats['valid_acc'], train_obj, stats['valid_loss'], scheduler.get_lr()[0], best_epoch, best_valid_acc) checkpoint = { 'epoch': epoch, 'state_dict': cnn_model.state_dict(), 'best_acc_top1': best_valid_acc, 'optimizer' : optimizer.state_dict(), } checkpoint.update(stats) utils.save_checkpoint(stats, is_best, args.save) best_epoch_str = utils.dict_to_log_string(best_stats, key_prepend='best_') logger.info(best_epoch_str) logger.info('Training of Final Model Complete! Save dir: ' + str(args.save))
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) print(torch.cuda.device_count()) np.random.seed(args.seed) cudnn.benchmark = True torch.manual_seed(args.seed) if hvd.rank() == 0: logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) best_acc_top1 = 0 start_epoch = 0 if args.resume: checkpoint = torch.load(os.path.join(args.save, 'checkpoint.pth.tar')) best_checkpoint = torch.load( os.path.join(args.save, 'model_best.pth.tar')) start_epoch = checkpoint['epoch'] best_acc_top1 = best_checkpoint['best_acc_top1'] start_epoch = hvd.broadcast(torch.tensor(start_epoch), root_rank=0, name='start_epoch').item() best_acc_top1 = hvd.broadcast(torch.tensor(best_acc_top1), root_rank=0, name='best_acc_top1').item() genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype) if args.parallel: model = nn.DataParallel(model).cuda() else: model = model.cuda() if hvd.rank() == 0: logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth) criterion_smooth = criterion_smooth.cuda() optimizer = torch.optim.SGD(model.parameters(), args.learning_rate * hvd.size(), momentum=args.momentum, weight_decay=args.weight_decay) # ***************** horovod ******************* optimizer = hvd.DistributedOptimizer( optimizer, named_parameters=model.named_parameters()) # ***************** horovod ******************* traindir = os.path.join(args.data, 'train') validdir = os.path.join(args.data, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_data = dset.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2), transforms.ToTensor(), normalize, ])) valid_data = dset.ImageFolder( validdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])) train_sampler = torch.utils.data.distributed.DistributedSampler( train_data, num_replicas=hvd.size(), rank=hvd.rank()) train_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, pin_memory=True, num_workers=args.num_workers, sampler=train_sampler) valid_queue = torch.utils.data.DataLoader(valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=args.num_workers) if start_epoch > 0 and hvd.rank() == 0: checkpoint = torch.load(os.path.join(args.save, 'checkpoint.pth.tar')) start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("checkpoint {}, model, optimizer was loaded".format(start_epoch)) hvd.broadcast_parameters(model.state_dict(), root_rank=0) hvd.broadcast_optimizer_state(optimizer, root_rank=0) if not args.resume: set_lr(0, 0, len(train_queue), optimizer, args.scheduler) for epoch in range(start_epoch, args.epochs + args.warmup_epochs): if hvd.rank() == 0: lr = optimizer.param_groups[0]['lr'] logging.info('epoch %d lr %e', epoch, lr) with open(os.path.join(args.save, 'learning_rate.txt'), mode='a') as f: f.write(str(lr) + '\n') if args.parallel: model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs else: model.drop_path_prob = args.drop_path_prob * epoch / args.epochs hvd.broadcast_parameters(model.state_dict(), root_rank=0) train_acc, train_obj = train(train_queue, train_sampler, model, criterion_smooth, optimizer, epoch) if hvd.rank() == 0: logging.info('train_acc %f', train_acc) with open(os.path.join(args.save, "train_acc.txt"), mode='a') as f: f.write(str(train_acc) + '\n') with open(os.path.join(args.save, "train_loss.txt"), mode='a') as f: f.write(str(train_obj) + '\n') valid_acc_top1, valid_acc_top5, valid_obj = infer( valid_queue, model, criterion) if hvd.rank() == 0: logging.info('valid_acc_top1 %f', valid_acc_top1) logging.info('valid_acc_top5 %f', valid_acc_top5) with open(os.path.join(args.save, "test_acc_1.txt"), mode='a') as f: f.write(str(valid_acc_top1) + '\n') with open(os.path.join(args.save, "test_acc_5.txt"), mode='a') as f: f.write(str(valid_acc_top5) + '\n') with open(os.path.join(args.save, "test_loss.txt"), mode='a') as f: f.write(str(valid_obj) + '\n') is_best = False if valid_acc_top1 > best_acc_top1: best_acc_top1 = valid_acc_top1 is_best = True if hvd.rank() == 0: utils.save_checkpoint( { 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_acc_top1': best_acc_top1, 'optimizer': optimizer.state_dict(), }, is_best, args.save)
def main(): if not torch.cuda.is_available(): logging.info('No GPU device available') sys.exit(1) np.random.seed(args.seed) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled=True torch.cuda.manual_seed(args.seed) logging.info("args = %s", args) logging.info("unparsed_args = %s", unparsed) num_gpus = torch.cuda.device_count() if not os.path.exists(args.base_path): os.makedirs(args.base_path) ckpt_dir = os.path.join(args.base_path, "ImageNet") if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) # ckpt_dir_1 = os.path.join(args.base_path, "ImageNet-1") # if not os.path.exists(ckpt_dir): # os.makedirs(ckpt_dir_1) # print(arhs.arch) if args.arch is not None: genotype = eval("genotypes.%s" % args.arch) elif args.base_path is not None and args.genotype_name is not None: genotype_path = os.path.join(args.base_path, 'results_of_7q/genotype') genotype = get_genotype(genotype_path, args.genotype_name) else: raise(ValueError("the parser input arch, genotype_path, genotype_name should not be all None")) # print(genotype) # genotype = eval("genotypes.%s" % args.arch) print('---------Genotype---------') logging.info(genotype) print('--------------------------') model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype) if num_gpus > 1: model = nn.DataParallel(model) model = model.cuda() else: model = model.cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth) criterion_smooth = criterion_smooth.cuda() optimizer = torch.optim.SGD( model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay ) # data_dir = os.path.join(args.tmp_data_dir, 'imagenet') # traindir = os.path.join(data_dir, 'train') # validdir = os.path.join(data_dir, 'val') traindir = os.path.join(args.data, 'ILSVRC2012_img_train') validdir = os.path.join(args.data, 'ILSVRC2012_img_val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) valid_data = dset.ImageFolder( validdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])) train_data = dset.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter( brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2), transforms.ToTensor(), normalize, ])) train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=args.workers) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=args.workers) best_acc_top1 = 0 best_acc_top5 = 0 ckpt_file = None start_epoch = 0 if not args.from_scratch: # check wheter have pre-trained models if args.load_file is not None and os.path.exists(args.load_file): ckpt_file = args.load_file else: # deal with negative names files = os.listdir(ckpt_dir) for f in files: tmp = f.split('.') if len(tmp) > 2: continue tmp = int(tmp[0].split('_')[1]) if tmp > start_epoch: start_epoch = tmp ckpt_file = os.path.join(ckpt_dir, f) if ckpt_file is not None: logging.info('====== Load ckpt ======') logging.info("Loading from %s"%ckpt_file) start_epoch = 249 # if num_gpus > 1: # model.module.load_state_dict(checkpoint['state_dict']) # else: # model.load_state_dict(checkpoint['state_dict']) # start_epoch = int(checkpoint['epoch']) + 1 # optimizer.load_state_dict(checkpoint['optimizer']) # best_acc_top1 = float(checkpoint['best_acc_top1']) # logging.info("Training Start at %d"%start_epoch) logging.info("Training Start at %d"%start_epoch) sat= torch.load(ckpt_file) # print(type(sat)) # if num_gpus > 1: # model.load_state_dict(sat) # else: # model = model.cuda() # new_stat={} # for k,v in sat.items(): # # "module.stem0.0.weight" # new_k = k#[7:] # new_stat[new_k] =copy.deepcopy(v) model.load_state_dict(sat) # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma, last_epoch=start_epoch) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs)) for epoch in range(start_epoch, args.epochs): if args.lr_scheduler == 'cosine': scheduler.step() current_lr = scheduler.get_lr()[0] elif args.lr_scheduler == 'linear': current_lr = adjust_lr(optimizer, epoch) else: print('Wrong lr type, exit') sys.exit(1) logging.info('Epoch: %d lr %e', epoch, current_lr) if epoch < 5 and args.batch_size > 256: for param_group in optimizer.param_groups: param_group['lr'] = current_lr * (epoch + 1) / 5.0 logging.info('Warming-up Epoch: %d, LR: %e', epoch, current_lr * (epoch + 1) / 5.0) if num_gpus > 1: model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs else: model.drop_path_prob = args.drop_path_prob * epoch / args.epochs epoch_start = time.time() train_acc, train_obj = train(train_queue, model, criterion_smooth, optimizer) logging.info('Train_acc: %f', train_acc) valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model, criterion) logging.info('Valid_acc_top1: %f', valid_acc_top1) logging.info('Valid_acc_top5: %f', valid_acc_top5) epoch_duration = time.time() - epoch_start logging.info('Epoch time: %ds.', epoch_duration) # save current epoch model, and remove previous model try: last_model = os.path.join(ckpt_dir, 'weights_%d.pt'%(epoch-1)) os.remove(last_model) except: pass ckpt = { 'epoch': epoch, 'state_dict': model.state_dict(), 'best_acc_top1': best_acc_top1, 'optimizer' : optimizer.state_dict(), } utils.save(model, os.path.join(ckpt_dir, 'weights_%d.pt'%(epoch))) if valid_acc_top1 > best_acc_top1: try: last_model = os.path.join(ckpt_dir, 'weights_%.3f.pt'%(best_acc_top1)) os.remove(last_model) except: pass ckpt = { 'epoch': epoch, 'state_dict': model.state_dict(), 'best_acc_top1': best_acc_top1, 'optimizer' : optimizer.state_dict(), } utils.save(model, os.path.join(ckpt_dir, 'weights_%.3f.pt'%(valid_acc_top1))) best_acc_top1 = valid_acc_top1 best_acc_top5 = valid_acc_top5 # is_best = False # if valid_acc_top5 > best_acc_top5: # best_acc_top5 = valid_acc_top5 # if valid_acc_top1 > best_acc_top1: # best_acc_top1 = valid_acc_top1 # is_best = True # utils.save_checkpoint({ # 'epoch': epoch + 1, # 'state_dict': model.state_dict(), # 'best_acc_top1': best_acc_top1, # 'optimizer' : optimizer.state_dict(), # }, is_best, args.save) print("the best acc: %f(Top1); %f(Top5)"%(best_acc_top1, best_acc_top5))
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) # torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) # logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) if '.' in args.arch: from taowei.torch2.models import load_network model = load_network(args.arch) else: genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype) if args.parallel: model = nn.DataParallel(model).cuda() else: model = model.cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth) criterion_smooth = criterion_smooth.cuda() optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) traindir = os.path.join(args.data, 'train') validdir = os.path.join(args.data, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_data = dset.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2), transforms.ToTensor(), normalize, ])) valid_data = dset.ImageFolder( validdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])) train_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=args.workers) valid_queue = torch.utils.data.DataLoader(valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=args.workers) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma) # # evaluate first # args.epoch = -1 # _unwrap_model(model).drop_path_prob = 0.0 # # model.drop_path_prob = 0.0 # valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model, criterion) best_acc_top1 = 0 for epoch in range(args.epochs): args.epoch = epoch # keep a record of current epoch scheduler.step(epoch) # logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0]) _unwrap_model( model).drop_path_prob = args.drop_path_prob * epoch / args.epochs # model.drop_path_prob = args.drop_path_prob * epoch / args.epochs train_acc, train_obj = train(train_queue, model, criterion_smooth, optimizer) # logging.info('train_acc %f', train_acc) valid_acc_top1, valid_acc_top5, valid_obj = infer( valid_queue, model, criterion) # logging.info('valid_acc_top1 %f', valid_acc_top1) # logging.info('valid_acc_top5 %f', valid_acc_top5) is_best = False if valid_acc_top1 > best_acc_top1: best_acc_top1 = valid_acc_top1 is_best = True utils.save_checkpoint( { 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_acc_top1': best_acc_top1, 'optimizer': optimizer.state_dict(), }, is_best, args.save)
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) #torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled=True torch.cuda.manual_seed(args.seed) logging.info('gpu device num = %d' % args.ngpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype, args.residual_wei, args.shrink_channel) if args.parallel: model = nn.DataParallel(model).cuda() #model = nn.parallel.DistributedDataParallel(model).cuda() else: model = model.cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth) criterion_smooth = criterion_smooth.cuda() optimizer = torch.optim.SGD( #model.parameters(), utils.set_group_weight(model, args.bn_no_wd, args.bias_no_wd), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay ) resume = os.path.join(args.save, 'checkpoint.pth.tar') if os.path.exists(resume): print("=> loading checkpoint %s" % resume) #checkpoint = torch.load(resume) checkpoint = torch.load(resume, map_location = lambda storage, loc: storage.cuda(0)) args.start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['state_dict']) #optimizer.load_state_dict(checkpoint['optimizer']) optimizer.state_dict()['state'] = checkpoint['optimizer']['state'] print('=> loaded checkpoint epoch %d' % args.start_epoch) if args.start_epoch >= args.epochs: print('training finished') sys.exit(0) traindir = os.path.join(args.data, 'train') validdir = os.path.join(args.data, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_data = dset.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(args.image_size), transforms.RandomHorizontalFlip(), transforms.ColorJitter( brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1), transforms.ToTensor(), normalize, ])) valid_data = dset.ImageFolder( validdir, transforms.Compose([ transforms.Resize(int((256.0 / 224) * args.image_size)), transforms.CenterCrop(args.image_size), transforms.ToTensor(), normalize, ])) train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=nworker) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=nworker) best_acc_top1 = 0 for epoch in range(args.start_epoch, args.epochs): if args.lr_strategy == 'cos': lr = utils.set_lr(optimizer, epoch, args.epochs, args.learning_rate) #elif args.lr_strategy == 'step': # scheduler.step() # lr = scheduler.get_lr()[0] logging.info('epoch %d lr %e', epoch, lr) if args.parallel: model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs else: model.drop_path_prob = args.drop_path_prob * epoch / args.epochs train_acc, train_obj = train(train_queue, model, criterion_smooth, optimizer, epoch) logging.info('train_acc %f', train_acc) utils.save_checkpoint({ 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_acc_top1': train_acc, 'optimizer' : optimizer.state_dict(), }, False, args.save) #if epoch >= args.early_stop: # break valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model, criterion) logging.info('valid_acc_top1 %f', valid_acc_top1) logging.info('valid_acc_top5 %f', valid_acc_top5)
if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) cudnn.benchmark = True cudnn.enabled = True logging.info("args = %s", args) logging.info('Training with config:') logging.info(pprint.pformat(config)) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype) model.drop_path_prob = args.drop_path_prob model = nn.DataParallel(model) model = model.cuda() model.load_state_dict(torch.load(args.model_path)) imagenet = imagenet_data.ImageNet12( trainFolder=os.path.join(args.data_path, 'train'), testFolder=os.path.join(args.data_path, 'val'), num_workers=config.data.num_workers, data_config=config.data) valid_queue = imagenet.getTestLoader(config.data.batch_size) trainer = Trainer(None, valid_queue, None, config, args.report_freq) with torch.no_grad(): val_acc_top1, val_acc_top5, valid_obj, batch_time = trainer.infer( model)
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) rank, world_size = dist_util.dist_init(args.port, 'nccl') np.random.seed(args.seed) # torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) # logging.info('gpu device = %d' % args.gpu) if rank == 0: generate_date = str(datetime.now().date()) utils.create_exp_dir(generate_date, args.save, scripts_to_save=glob.glob('*.py')) logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p') fh = logging.FileHandler(os.path.join(args.save, 'log.txt')) fh.setFormatter(logging.Formatter(log_format)) logging.getLogger().addHandler(fh) logging.info("args = %s", args) logger = tensorboardX.SummaryWriter('./runs/eval_imagenet_{}_{}'.format(args.arch, args.remark)) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype) model = model.cuda() if rank == 0: logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth) criterion_smooth = criterion_smooth.cuda() if args.warm_start: lr = args.warm_start_lr / args.warm_start_gamma else: lr = args.learning_rate optimizer = torch.optim.SGD( model.parameters(), lr, momentum=args.momentum, weight_decay=args.weight_decay ) traindir = os.path.join(args.data, 'train') validdir = os.path.join(args.data, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_transform = transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter( brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2), transforms.ToTensor(), normalize, ]) train_dataset = get_dataset(traindir, os.path.join(args.data, 'meta/train.txt'), train_transform) valid_transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ]) valid_dataset = get_dataset(validdir, os.path.join(args.data, 'meta/val.txt'), valid_transform) # train_queue = torch.utils.data.DataLoader( # train_dataset, batch_size=args.batch_size // world_size, sampler=DistributedSampler(train_dataset), # pin_memory=True, num_workers=4) # valid_queue = torch.utils.data.DataLoader( # valid_dataset, batch_size=args.batch_size // world_size, sampler=DistributedSampler(valid_dataset), # pin_memory=True, num_workers=4) train_queue = torch.utils.data.DataLoader( train_dataset, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=4) valid_queue = torch.utils.data.DataLoader( valid_dataset, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=4) if args.warm_start: scheduler = utils.WarmStart(optimizer, gamma=args.warm_start_gamma) else: scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma) best_acc_top1 = 0 for epoch in range(args.epochs): '''if epoch == 0 or epoch == 1: for param_group in optimizer.param_groups: param_group['lr'] = args.warm_up_learning_rate elif epoch == 2: for param_group in optimizer.param_groups: param_group['lr'] = args.learning_rate else: scheduler.step()''' scheduler.step() if rank == 0: logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0]) model.drop_path_prob = args.drop_path_prob * epoch / args.epochs train_acc, train_obj = train(train_queue, model, criterion_smooth, optimizer, rank) if rank == 0: logging.info('train_acc %f', train_acc) logger.add_scalar("epoch_train_acc", train_acc, epoch) logger.add_scalar("epoch_train_loss", train_obj, epoch) valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model, criterion, rank) if rank == 0: logging.info('valid_acc_top1 %f', valid_acc_top1) logging.info('valid_acc_top5 %f', valid_acc_top5) logger.add_scalar("epoch_valid_acc_top1", valid_acc_top1, epoch) logger.add_scalar("epoch_valid_acc_top5", valid_acc_top5, epoch) is_best = False if valid_acc_top1 > best_acc_top1: best_acc_top1 = valid_acc_top1 is_best = True if args.warm_start: # if not is_best and not scheduler.lr_const: if True: if rank == 0: logging.info('warm start ended lr %e', scheduler.get_lr()[0]) logging.info("=> loading checkpoint '{}'".format(args.save)) # checkpoint = torch.load(os.path.join(args.save, 'model_best.pth.tar')) checkpoint = torch.load(os.path.join(args.save, 'model_best.pth.tar'), map_location=lambda storage, loc: storage) best_acc_top1 = checkpoint['best_acc_top1'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) scheduler.lr_const = True if rank == 0: logging.info('return to last checkpoint') del checkpoint # dereference seems crucial torch.cuda.empty_cache() # args.start_epoch = checkpoint['epoch'] '''best_acc_top1 = checkpoint['best_acc_top1'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) scheduler.lr_const = True if rank == 0: logging.info('return to last checkpoint')''' if rank == 0: utils.save_checkpoint({ 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_acc_top1': best_acc_top1, 'optimizer': optimizer.state_dict(), }, is_best, args.save)
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype) if args.parallel: model = nn.DataParallel(model).cuda() else: model = model.cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth) criterion_smooth = criterion_smooth.cuda() optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) traindir = os.path.join(args.data, 'train') validdir = os.path.join(args.data, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_data = dset.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2), transforms.ToTensor(), normalize, ])) valid_data = dset.ImageFolder( validdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])) train_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=8) valid_queue = torch.utils.data.DataLoader(valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=8) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma) start_epoch = 0 if args.resume: with open(os.path.join(args.resume_dir, 'network.pickle'), 'rb') as f: model = pickle.load(f) model.cuda() checkpoint = torch.load( os.path.join(args.resume_dir, 'checkpoint.pth.tar')) start_epoch = checkpoint['epoch'] scheduler.load_state_dict(checkpoint['scheduler_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) print('model was loaded. Start training from epoch {}.'.format( start_epoch)) best_acc_top1 = 0 for epoch in range(start_epoch, args.epochs): logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0]) with open(os.path.join(args.save, 'learning_rate.txt'), mode='a') as f: f.write(str(scheduler.get_lr()[0]) + '\n') if args.parallel: model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs else: model.drop_path_prob = args.drop_path_prob * epoch / args.epochs train_acc, train_obj = train(train_queue, model, criterion_smooth, optimizer) logging.info('train_acc %f', train_acc) with open(os.path.join(args.save, "train_acc.txt"), mode='a') as f: f.write(str(train_acc) + '\n') with open(os.path.join(args.save, "train_loss.txt"), mode='a') as f: f.write(str(train_obj) + '\n') scheduler.step() valid_acc_top1, valid_acc_top5, valid_obj = infer( valid_queue, model, criterion) logging.info('valid_acc_top1 %f', valid_acc_top1) logging.info('valid_acc_top5 %f', valid_acc_top5) with open(os.path.join(args.save, "test_acc_1.txt"), mode='a') as f: f.write(str(valid_acc_top1) + '\n') with open(os.path.join(args.save, "test_acc_5.txt"), mode='a') as f: f.write(str(valid_acc_top5) + '\n') with open(os.path.join(args.save, "test_loss.txt"), mode='a') as f: f.write(str(valid_obj) + '\n') is_best = False if valid_acc_top1 > best_acc_top1: best_acc_top1 = valid_acc_top1 is_best = True if args.parallel: utils.save_checkpoint( { 'epoch': epoch + 1, 'model_state_dict': model.module.state_dict(), 'best_acc_top1': best_acc_top1, 'optimizer_state_dict': optimizer.state_dict(), 'scheduler_state_dict': scheduler.state_dict(), }, is_best, args.save) else: utils.save_checkpoint( { 'epoch': epoch + 1, 'model_state_dict': model.state_dict(), 'best_acc_top1': best_acc_top1, 'optimizer_state_dict': optimizer.state_dict(), 'scheduler_state_dict': scheduler.state_dict(), }, is_best, args.save) with open(os.path.join(args.save, 'network.pickle'), 'wb') as f: pickle.dump(model, f)
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype) model.drop_path_prob = args.drop_path_prob if args.parallel: model = nn.DataParallel(model).cuda() else: model = model.cuda() #checkpoint = torch.load('./model_best.pth.tar') logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) loss_weight = np.array([3.04, 1, 3.45, 15.09, 4.51, 43.26, 32.45, 23.18]) loss_weight = torch.from_numpy(loss_weight).float() criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth) criterion_smooth = criterion_smooth.cuda() optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) #model.load_state_dict(checkpoint['state_dict']) traindir = os.path.join(args.data, 'train') validdir = os.path.join(args.data, 'test') normalize = transforms.Normalize(mean=[0.763, 0.545, 0.570], std=[0.141, 0.152, 0.169]) train_data = dset.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2), transforms.ToTensor(), normalize, ])) valid_data = dset.ImageFolder( validdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])) num_classes = len(train_data.classes) class_weight = np.array([ 1. / 213, 1. / 649, 1. / 188, 1. / 43, 1. / 144, 1. / 15, 1. / 20, 1. / 28 ]) class_weight = torch.from_numpy(class_weight) train_targets = [sample[1] for sample in train_data.imgs] valid_targets = [sample[1] for sample in valid_data.imgs] train_samples_weight = [ class_weight[class_id] for class_id in train_targets ] train_sampler = torch.utils.data.sampler.WeightedRandomSampler( train_samples_weight, len(train_data)) valid_samples_weight = [ class_weight[class_id] for class_id in valid_targets ] valid_sampler = torch.utils.data.sampler.WeightedRandomSampler( valid_samples_weight, len(valid_data)) train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=4) #, sampler = train_sampler) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=4) #, sampler = valid_sampler) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma) best_acc_top1 = 0 for epoch in range(args.epochs): logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0]) model.drop_path_prob = args.drop_path_prob * (epoch / args.epochs) train_acc, train_obj = train(train_queue, model, criterion, optimizer) logging.info('train_acc %f', train_acc) scheduler.step() valid_acc_top1, valid_acc_top5, valid_obj = infer( valid_queue, model, criterion) logging.info('valid_acc_top1 %f', valid_acc_top1) logging.info('valid_acc_top5 %f', valid_acc_top5) utils.save(model, os.path.join(args.save, 'weights.pt')) is_best = False if valid_acc_top1 > best_acc_top1: best_acc_top1 = valid_acc_top1 is_best = True utils.save_checkpoint( { 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_acc_top1': best_acc_top1, 'optimizer': optimizer.state_dict(), }, is_best, args.save)
def main(): if not torch.cuda.is_available(): logging.info('No GPU device available') sys.exit(1) num_gpus = torch.cuda.device_count() args.gpu = args.local_rank % num_gpus torch.cuda.set_device(args.gpu) np.random.seed(args.seed) cudnn.benchmark = True cudnn.deterministic = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info("args = %s", args) logging.info("unparsed_args = %s", unparsed) torch.distributed.init_process_group(backend='nccl', init_method='env://') args.world_size = torch.distributed.get_world_size() args.batch_size = args.batch_size // args.world_size genotype = eval("genotypes.%s" % args.arch) logging.info('---------Genotype---------') logging.info(genotype) logging.info('--------------------------') model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype) model = model.cuda(args.gpu) model = apex.parallel.DistributedDataParallel(model, delay_allreduce=True) model_profile = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype) model_profile = model_profile.cuda(args.gpu) model_input_size_imagenet = (1, 3, 224, 224) model_profile.drop_path_prob = 0 flops, _ = profile(model_profile, model_input_size_imagenet) logging.info("flops = %fMB, param size = %fMB", flops, count_parameters_in_MB(model)) criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth) criterion_smooth = criterion_smooth.cuda() optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) # Prepare data total_iters = per_epoch_iters * args.epochs train_loader = get_train_dataloader(args.train_dir, args.batch_size, args.local_rank, total_iters) train_dataprovider = DataIterator(train_loader) val_loader = get_val_dataloader(args.test_dir) val_dataprovider = DataIterator(val_loader) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, float(args.epochs)) start_epoch = 0 best_acc_top1 = 0 best_acc_top5 = 0 checkpoint_tar = os.path.join(args.save, 'checkpoint.pth.tar') if os.path.exists(checkpoint_tar): logging.info('loading checkpoint {} ..........'.format(checkpoint_tar)) checkpoint = torch.load( checkpoint_tar, map_location={'cuda:0': 'cuda:{}'.format(args.local_rank)}) start_epoch = checkpoint['epoch'] + 1 model.load_state_dict(checkpoint['state_dict']) logging.info("loaded checkpoint {} epoch = {}".format( checkpoint_tar, checkpoint['epoch'])) # evaluation mode if args.eval: if args.eval_resume is not None: checkpoint = torch.load(args.eval_resume) model.module.drop_path_prob = 0 model.load_state_dict(checkpoint['state_dict']) valid_acc_top1, valid_acc_top5 = infer(val_dataprovider, model.module, val_iters) print('valid_acc_top1: {}'.format(valid_acc_top1)) exit(0) for epoch in range(start_epoch, args.epochs): if args.lr_scheduler == 'cosine': scheduler.step() current_lr = scheduler.get_lr()[0] elif args.lr_scheduler == 'linear': current_lr = adjust_lr(optimizer, epoch) else: logging.info('Wrong lr type, exit') sys.exit(1) logging.info('Epoch: %d lr %e', epoch, current_lr) if epoch < 5 and args.batch_size > 256: for param_group in optimizer.param_groups: param_group['lr'] = current_lr * (epoch + 1) / 5.0 logging.info('Warming-up Epoch: %d, LR: %e', epoch, current_lr * (epoch + 1) / 5.0) model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs epoch_start = time.time() train_acc, train_obj = train(train_dataprovider, model, criterion_smooth, optimizer, per_epoch_iters) writer.add_scalar('Train/Loss', train_obj, epoch) writer.add_scalar('Train/LR', current_lr, epoch) if args.local_rank == 0 and (epoch % 5 == 0 or args.epochs - epoch < 10): valid_acc_top1, valid_acc_top5 = infer(val_dataprovider, model.module, val_iters) is_best = False if valid_acc_top5 > best_acc_top5: best_acc_top5 = valid_acc_top5 if valid_acc_top1 > best_acc_top1: best_acc_top1 = valid_acc_top1 is_best = True logging.info('Valid_acc_top1: %f', valid_acc_top1) logging.info('Valid_acc_top5: %f', valid_acc_top5) logging.info('best_acc_top1: %f', best_acc_top1) epoch_duration = time.time() - epoch_start logging.info('Epoch time: %ds.', epoch_duration) save_checkpoint_( { 'epoch': epoch, 'state_dict': model.state_dict(), 'best_acc_top1': best_acc_top1, 'optimizer': optimizer.state_dict(), }, args.save)
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype_path = os.path.join(utils.get_dir(), args.genotype_path, 'genotype.txt') if os.path.isfile(genotype_path): with open(genotype_path, "r") as f: geno_raw = f.read() genotype = eval(geno_raw) else: genotype = eval("genotypes.%s" % args.arch) f = open(os.path.join(args.save, 'genotype.txt'), "w") f.write(str(genotype)) f.close() model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype) if args.parallel: model = nn.DataParallel(model).cuda() else: model = model.cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth) criterion_smooth = criterion_smooth.cuda() optimizer = torch.optim.SGD( model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay ) traindir = os.path.join(utils.get_dir(), args.data, 'train') validdir = os.path.join(utils.get_dir(), args.data, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_data = dset.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter( brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2), transforms.ToTensor(), normalize, ])) valid_data = dset.ImageFolder( validdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])) train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=4) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=4) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma) best_acc_top1 = 0 for epoch in range(args.epochs): scheduler.step() logging.info('epoch %d lr %e', epoch, scheduler.get_last_lr()[0]) model.drop_path_prob = args.drop_path_prob * epoch / args.epochs train_acc, train_obj = train(train_queue, model, criterion_smooth, optimizer) logging.info('train_acc %f', train_acc) valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model, criterion) logging.info('valid_acc_top1 %f', valid_acc_top1) logging.info('valid_acc_top5 %f', valid_acc_top5) is_best = False if valid_acc_top1 > best_acc_top1: best_acc_top1 = valid_acc_top1 is_best = True utils.save_checkpoint({ 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_acc_top1': best_acc_top1, 'optimizer': optimizer.state_dict(), }, is_best, args.save) if args.test: torch.cuda.empty_cache() os.system( 'python src/test_imagenet.py --batch_size 8 --auxiliary --model_path %s ' % os.path.join(args.save, 'weights.pt'))
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype) if args.parallel: model = nn.DataParallel(model).cuda() else: model = model.cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth) criterion_smooth = criterion_smooth.cuda() optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) traindir = os.path.join(args.data, 'train') validdir = os.path.join(args.data, 'valid') normalize = transforms.Normalize(mean=[0.4802, 0.4481, 0.3975], std=[0.2302, 0.2265, 0.2262]) train_data = dset.ImageFolder( traindir, transforms.Compose([ transforms.RandomRotation(20), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ])) valid_data = dset.ImageFolder( validdir, transforms.Compose([ transforms.ToTensor(), normalize, ])) train_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=4) valid_queue = torch.utils.data.DataLoader(valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=4) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma) best_acc_top1 = 0 for epoch in range(args.epochs): scheduler.step() logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0]) model.drop_path_prob = args.drop_path_prob * epoch / args.epochs train_acc, train_obj = train(train_queue, model, criterion_smooth, optimizer) logging.info('train_acc %f', train_acc) valid_acc_top1, valid_acc_top5, valid_obj = infer( valid_queue, model, criterion) logging.info('valid_acc_top1 %f', valid_acc_top1) logging.info('valid_acc_top5 %f', valid_acc_top5) is_best = False if valid_acc_top1 > best_acc_top1: best_acc_top1 = valid_acc_top1 is_best = True utils.save_checkpoint( { 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_acc_top1': best_acc_top1, 'optimizer': optimizer.state_dict(), }, is_best, args.save)
def main(): np.random.seed(args.seed) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, NUM_CLASSES, args.layers, config.optim.auxiliary, genotype) start_epoch = 0 model.eval() model.drop_path_prob = args.drop_path_prob * 0 # compute the params as well as the multi-adds params = count_parameters_in_MB(model) logging.info("Params = %.2fMB" % params) mult_adds = comp_multadds(model, input_size=config.data.input_size) logging.info("Mult-Adds = %.2fMB" % mult_adds) model.train() if len(args.gpus) > 1: model = nn.DataParallel(model) model = model.cuda() if config.optim.label_smooth: criterion = CrossEntropyLabelSmooth(NUM_CLASSES, config.optim.smooth_alpha) else: criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() optimizer = torch.optim.SGD(model.parameters(), config.optim.init_lr, momentum=config.optim.momentum, weight_decay=config.optim.weight_decay) imagenet = imagenet_data.ImageNet12( trainFolder=os.path.join(args.data_path, 'train'), testFolder=os.path.join(args.data_path, 'val'), num_workers=config.data.num_workers, type_of_data_augmentation=config.data.type_of_data_aug, data_config=config.data, size_images=config.data.input_size[1], scaled_size=config.data.scaled_size[1]) train_queue, valid_queue = imagenet.getTrainTestLoader( config.data.batch_size) if config.optim.lr_schedule == 'cosine': scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, float(config.train_params.epochs)) trainer = Trainer(train_queue, valid_queue, criterion, config, args.report_freq) best_epoch = [0, 0, 0] # [epoch, acc_top1, acc_top5] lr = config.optim.init_lr for epoch in range(start_epoch, config.train_params.epochs): if config.optim.lr_schedule == 'cosine': scheduler.step() current_lr = scheduler.get_lr()[0] elif config.optim.lr_schedule == 'linear': # with warmup initial optimizer, current_lr = adjust_lr(optimizer, config.train_params.epochs, lr, epoch) else: print('Wrong lr type, exit') sys.exit(1) if epoch < 5: # Warmup epochs for 5 current_lr = lr * (epoch + 1) / 5.0 for param_group in optimizer.param_groups: param_group['lr'] = current_lr logging.info('Warming-up Epoch: %d, LR: %e', epoch, lr * (epoch + 1) / 5.0) logging.info('Epoch: %d lr %e', epoch, current_lr) if len(args.gpus) > 1: model.module.drop_path_prob = args.drop_path_prob * epoch / config.train_params.epochs else: model.drop_path_prob = args.drop_path_prob * epoch / config.train_params.epochs train_acc_top1, train_acc_top5, train_obj, batch_time, data_time = trainer.train( model, optimizer, epoch) with torch.no_grad(): val_acc_top1, val_acc_top5, batch_time, data_time = trainer.infer( model, epoch) if val_acc_top1 > best_epoch[1]: best_epoch = [epoch, val_acc_top1, val_acc_top5] if epoch >= 0: # 120 utils.save_checkpoint( { 'epoch': epoch + 1, 'state_dict': model.module.state_dict(), 'best_acc_top1': val_acc_top1, 'optimizer': optimizer.state_dict(), }, save_path=args.save, epoch=epoch, is_best=True) if len(args.gpus) > 1: utils.save( model.module.state_dict(), os.path.join( args.save, 'weights_{}_{}.pt'.format(epoch, val_acc_top1))) else: utils.save( model.state_dict(), os.path.join( args.save, 'weights_{}_{}.pt'.format(epoch, val_acc_top1))) logging.info('BEST EPOCH %d val_top1 %.2f val_top5 %.2f', best_epoch[0], best_epoch[1], best_epoch[2]) logging.info( 'epoch: {} \t train_acc_top1: {:.4f} \t train_loss: {:.4f} \t val_acc_top1: {:.4f}' .format(epoch, train_acc_top1, train_obj, val_acc_top1)) logging.info("Params = %.2fMB" % params) logging.info("Mult-Adds = %.2fMB" % mult_adds)
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) args.arch = eval(args.arch) model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, args.arch) if args.parallel: model = nn.DataParallel(model).cuda() else: model = model.cuda() from thop import profile input = torch.randn(1, 3, 224, 224) flops, params = profile(model, inputs=(input,)) total_params = sum(p.numel() for p in model.parameters() if p.requires_grad) print(flops, params) print(total_params) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth) criterion_smooth = criterion_smooth.cuda() optimizer = torch.optim.SGD( model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay ) traindir = os.path.join(args.data, 'train') validdir = os.path.join(args.data, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_data = dset.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter( brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2), transforms.ToTensor(), normalize, Cutout(20), ])) valid_data = dset.ImageFolder( validdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])) train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=4) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=4) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma) best_acc_top1 = 0 for epoch in range(args.epochs): scheduler.step() logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0]) model.drop_path_prob = args.drop_path_prob * epoch / args.epochs train_acc, train_obj = train(train_queue, model, criterion_smooth, optimizer) logging.info('train_acc %f', train_acc) valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model, criterion) logging.info('valid_acc_top1 %f', valid_acc_top1) logging.info('valid_acc_top5 %f', valid_acc_top5) is_best = False if valid_acc_top1 > best_acc_top1: best_acc_top1 = valid_acc_top1 is_best = True utils.save_checkpoint({ 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_acc_top1': best_acc_top1, 'optimizer': optimizer.state_dict(), }, is_best, args.save)
def main(): # Scale learning rate based on global batch size. if not args.no_scale_lr: scale = float(args.batch_size * args.world_size) / 128.0 args.learning_rate = scale * args.learning_rate if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info('args = %s', args) # Get data loaders. traindir = os.path.join(args.data, 'train') validdir = os.path.join(args.data, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_transform = transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2), transforms.ToTensor(), normalize, ]) val_transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ]) if 'lmdb' in args.data: train_data = imagenet_lmdb_dataset(traindir, transform=train_transform) valid_data = imagenet_lmdb_dataset(validdir, transform=val_transform) else: train_data = dset.ImageFolder(traindir, transform=train_transform) valid_data = dset.ImageFolder(validdir, transform=val_transform) train_sampler = torch.utils.data.distributed.DistributedSampler(train_data) train_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=8, sampler=train_sampler) valid_queue = torch.utils.data.DataLoader(valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=8) # Set up the network. if os.path.isfile(args.genotype): logging.info('Loading genotype from: %s' % args.genotype) genotype = torch.load(args.genotype, map_location='cpu') else: genotype = eval('genotypes.%s' % args.genotype) if not isinstance(genotype, list): genotype = [genotype] # If num channels not provided, find the max under 600M MAdds. if args.init_channels < 0: if args.local_rank == 0: flops, num_params, init_channels = find_max_channels( genotype, args.layers, args.cell_type, args.max_M_flops * 1e6) logging.info('Num flops = %.2fM', flops / 1e6) logging.info('Num params = %.2fM', num_params / 1e6) else: init_channels = 0 # All reduce with world_size 1 is sum. init_channels = torch.Tensor([init_channels]).cuda() init_channels = utils.reduce_tensor(init_channels, 1) args.init_channels = int(init_channels.item()) logging.info('Num channels = %d', args.init_channels) # Create model and loss. model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype, args.cell_type) model = model.cuda() model = DDP(model, delay_allreduce=True) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth) criterion_smooth = criterion_smooth.cuda() logging.info('param size = %fM', utils.count_parameters_in_M(model)) # Set up network weights optimizer. optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) if args.lr_scheduler == 'exp': scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma) elif args.lr_scheduler == 'cosine': scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, float(args.epochs), eta_min=args.min_learning_rate) # Train. global_step = 0 best_acc_top1 = 0 for epoch in range(args.epochs): # Shuffle the sampler, update lrs. train_queue.sampler.set_epoch(epoch + args.seed) # Change lr. if epoch >= args.warmup_epochs: scheduler.step() model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs # Training. train_acc, train_obj, global_step = train(train_queue, model, criterion_smooth, optimizer, epoch, args.learning_rate, args.warmup_epochs, global_step) logging.info('train_acc %f', train_acc) writer.add_scalar('train/acc', train_acc, global_step) # Validation. valid_acc_top1, valid_acc_top5, valid_obj = infer( valid_queue, model, criterion) logging.info('valid_acc_top1 %f', valid_acc_top1) logging.info('valid_acc_top5 %f', valid_acc_top5) writer.add_scalar('val/acc_top1', valid_acc_top1, global_step) writer.add_scalar('val/acc_top5', valid_acc_top5, global_step) writer.add_scalar('val/loss', valid_obj, global_step) is_best = False if valid_acc_top1 > best_acc_top1: best_acc_top1 = valid_acc_top1 is_best = True if args.local_rank == 0: utils.save_checkpoint( { 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_acc_top1': best_acc_top1, 'optimizer': optimizer.state_dict(), }, is_best, args.save)
def main(): if not torch.cuda.is_available(): logging.info('No GPU device available') sys.exit(1) np.random.seed(args.seed) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info("args = %s", args) logging.info("unparsed_args = %s", unparsed) num_gpus = torch.cuda.device_count() genotype = eval("genotypes.%s" % args.arch) print('---------Genotype---------') logging.info(genotype) print('--------------------------') model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype) if num_gpus > 1: model = nn.DataParallel(model) model = model.cuda() else: model = model.cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth) criterion_smooth = criterion_smooth.cuda() optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) data_dir = os.path.join(args.tmp_data_dir, 'imagenet') traindir = os.path.join(data_dir, 'train') validdir = os.path.join(data_dir, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_data = dset.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2), transforms.ToTensor(), normalize, ])) valid_data = dset.ImageFolder( validdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])) train_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=args.workers) valid_queue = torch.utils.data.DataLoader(valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=args.workers) # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, float(args.epochs)) best_acc_top1 = 0 best_acc_top5 = 0 lr = args.learning_rate for epoch in range(args.epochs): if args.lr_scheduler == 'cosine': scheduler.step() current_lr = scheduler.get_lr()[0] elif args.lr_scheduler == 'linear': current_lr = adjust_lr(optimizer, epoch) else: print('Wrong lr type, exit') sys.exit(1) logging.info('Epoch: %d lr %e', epoch, current_lr) if epoch < 5 and args.batch_size > 256: for param_group in optimizer.param_groups: param_group['lr'] = lr * (epoch + 1) / 5.0 logging.info('Warming-up Epoch: %d, LR: %e', epoch, lr * (epoch + 1) / 5.0) if num_gpus > 1: model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs else: model.drop_path_prob = args.drop_path_prob * epoch / args.epochs epoch_start = time.time() train_acc, train_obj = train(train_queue, model, criterion_smooth, optimizer) logging.info('Train_acc: %f', train_acc) valid_acc_top1, valid_acc_top5, valid_obj = infer( valid_queue, model, criterion) logging.info('Valid_acc_top1: %f', valid_acc_top1) logging.info('Valid_acc_top5: %f', valid_acc_top5) epoch_duration = time.time() - epoch_start logging.info('Epoch time: %ds.', epoch_duration) is_best = False if valid_acc_top5 > best_acc_top5: best_acc_top5 = valid_acc_top5 if valid_acc_top1 > best_acc_top1: best_acc_top1 = valid_acc_top1 is_best = True utils.save_checkpoint( { 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_acc_top1': best_acc_top1, 'optimizer': optimizer.state_dict(), }, is_best, args.save)
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled=True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype) #model = utils.load_from_all("weights.pt") if args.parallel: model = nn.DataParallel(model).cuda() else: model = model.cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth) criterion_smooth = criterion_smooth.cuda() optimizer = torch.optim.SGD( model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay ) """ traindir = os.path.join(args.data, 'train') validdir = os.path.join(args.data, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_data = dset.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter( brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2), transforms.ToTensor(), normalize, ])) valid_data = dset.ImageFolder( validdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])) train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=4) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=4) """ data_dir = '../../data/dog_images' train_dir = data_dir + '/train' valid_dir = data_dir + '/valid' test_dir = data_dir + '/test' # Image Transformation normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_data = dset.ImageFolder( train_dir, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter( brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2), transforms.ToTensor(), normalize, ])) valid_data = dset.ImageFolder( valid_dir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])) train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=4) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=4) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma) best_acc_top1 = 0 for epoch in range(args.epochs): scheduler.step() logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0]) model.drop_path_prob = args.drop_path_prob * epoch / args.epochs train_acc, train_obj = train(train_queue, model, criterion_smooth, optimizer) logging.info('train_acc %f', train_acc) logits_all, valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model, criterion) pickle.dump(logits_all, open( "logits_.p", "wb" )) logging.info('valid_acc_top1 %f', valid_acc_top1) logging.info('valid_acc_top5 %f', valid_acc_top5) is_best = False if valid_acc_top1 > best_acc_top1: best_acc_top1 = valid_acc_top1 is_best = True utils.save_checkpoint({ 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_acc_top1': best_acc_top1, 'optimizer' : optimizer.state_dict(), }, is_best, args.save)