def main(): if not torch.cuda.is_available(): sys.exit(1) ## step 1 construct the selected network genotype = eval("genotypes.%s" % args.selected_arch) model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype) ## step 2 load pretrained model parameter if args.cifar100: model = torch.nn.DataParallel(model) model = model.cuda() model.load_state_dict(torch.load(args.model_path)['net']) else: utils.load(model, args.model_path) model = torch.nn.DataParallel(model) model = model.cuda() model.module.drop_path_prob = 0 model.drop_path_prob = 0 print("param size = %fMB" % utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() ## step 3 load test data valid_queue = load_data_cifar(args) ## step 4. inference on test data valid_acc, valid_obj = infer(valid_queue, model, criterion) print('-----------------------------------------------') print('Average Valid_acc: %f ' % valid_acc) print('-----------------------------------------------')
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype = genotypes.__dict__[args.arch] model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype) model = model.cuda() utils.load(model, args.model_path) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() _, test_transform = utils._data_transforms_cifar10(args) test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform) test_queue = DataLoader( test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) model.drop_path_prob = args.drop_path_prob test_acc, top5_acc, test_obj = infer(test_queue, model, criterion) logging.info('test_acc %f', test_acc)
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) cudnn.benchmark = True cudnn.enabled=True logging.info("args = %s", args) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CIFAR_CLASSES, args.layers, genotype) if args.parallel: model = MyDataParallel(model).cuda() else: model = model.cuda() bin_op = bin_utils.BinOp(model, args) _, valid_transform = utils._data_transforms_cifar10(args) valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) utils.load(model, args.path_to_weights) if args.parallel: model.module.drop_path_prob = args.drop_path_prob * (args.epochs-1) / args.epochs else: model.drop_path_prob = args.drop_path_prob * (args.epochs-1) / args.epochs valid_acc, valid_obj = infer(valid_queue, model, criterion, bin_op) logging.info('valid_acc %f', valid_acc)
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) torch.cuda.set_device(args.gpu) cudnn.enabled=True logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype) model = model.cuda() try: utils.load(model, args.model_path) except: model = model.module utils.load(model, args.model_path) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() _, test_transform = utils._data_transforms_cifar10(args) test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform) test_queue = torch.utils.data.DataLoader( test_data, batch_size=args.batch_size, shuffle=False, pin_memory=False, num_workers=2) model.drop_path_prob = 0.0 test_acc, test_obj = infer(test_queue, model, criterion) logging.info('Test_acc %f', test_acc)
def main(): np.random.seed(args.seed) gpus = [int(i) for i in args.gpu.split(',')] if len(gpus) == 1: torch.cuda.set_device(int(args.gpu)) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) logging.info('gpu device = %s' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype) model.cuda() if len(gpus) > 1: print("True") model = nn.parallel.DataParallel(model, device_ids=gpus, output_device=gpus[0]) model = model.module utils.load(model, args.model_path) print("If the model is running on GPU:", next(model.parameters()).is_cuda) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() _, test_transform = utils._data_transforms_cifar10(args) test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform) test_queue = torch.utils.data.DataLoader(test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) model.drop_path_prob = args.drop_path_prob test_acc, test_obj = infer(test_queue, model, criterion) logging.info('test_acc %f', test_acc)
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled=True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype) model = model.cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() optimizer = torch.optim.SGD( model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay ) train_transform, valid_transform = utils._data_transforms_cifar10(args) if args.set=='cifar100': train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR100(root=args.data, train=False, download=True, transform=valid_transform) else: train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform) #train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform) #valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform) train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs)) for epoch in range(args.epochs): scheduler.step() logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0]) model.drop_path_prob = args.drop_path_prob * epoch / args.epochs train_acc, train_obj = train(train_queue, model, criterion, optimizer) logging.info('train_acc %f', train_acc) valid_acc, valid_obj = infer(valid_queue, model, criterion) logging.info('valid_acc %f', valid_acc) utils.save(model, os.path.join(args.save, 'weights.pt'))
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled=True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype,output_height=args.img_cropped_height,output_width=args.img_cropped_width) model = model.cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.MSELoss() criterion = criterion.cuda() optimizer = torch.optim.SGD( model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay ) train_transform, valid_transform = utils._data_trainsforms_denosining_dataset(args) train_data = DENOISE_DATASET(root=args.data,train_folder=args.train_data,label_folder=args.label_data,train=True, transform=train_transform,target_transform=train_transform ) #valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform) num_train = len(train_data) indices = list(range(num_train)) split = int(np.floor(args.train_portion * num_train)) train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]), pin_memory=True, num_workers=2) valid_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]), pin_memory=True, num_workers=2) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs)) for epoch in range(args.epochs): scheduler.step() logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0]) model.drop_path_prob = args.drop_path_prob * epoch / args.epochs train_obj = train(train_queue, model, criterion, optimizer) logging.info('train_obj %f', train_obj) valid_obj = infer(valid_queue, model, criterion) logging.info('valid_obj %f', valid_obj) utils.save(model, os.path.join(args.save, './weights.pt'))
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) in_channels = 3 num_classes = 10 stride_for_aux = 3 genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, in_channels, stride_for_aux, num_classes, args.layers, args.auxiliary, genotype) model = model.cuda() utils.load(model, args.model_path) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() _, test_transform = utils._data_transforms_cifar10(args) #test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform) #version = 'v6' #images, labels = utils_test.load_new_test_data(version) #num_images = images.shape[0] data_10 = np.load( "/home/sivan/darts/cnn/data_cifar_10_1/cifar10.1_v4_data.npy") labels_10 = np.load( '/home/sivan/darts/cnn/data_cifar_10_1/cifar10.1_v4_labels.npy') # tensor_x = torch.stack([torch.Tensor(i) for i in data_10]) # transform to torch tensors tensor_y = torch.stack([torch.Tensor(i) for i in labels_10]) test_data = utils_d.TensorDataset(tensor_x, tensor_y) # create your datset test_queue = torch.utils.data.DataLoader(test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) model.drop_path_prob = args.drop_path_prob test_acc, test_obj = infer(test_queue, model, criterion) logging.info('test_acc %f', test_acc)
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) try: genotype = eval("genotypes.%s" % args.arch) except (AttributeError, SyntaxError): genotype = genotypes.load_genotype_from_file(args.arch) test_data, OUTPUT_DIM, IN_CHANNELS, is_regression = load_dataset( args, train=False) model = Network(args.init_channels, OUTPUT_DIM, args.layers, args.auxiliary, genotype, num_channels=IN_CHANNELS) model = model.cuda() utils.load(model, args.model_path) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() if not is_regression else nn.MSELoss() criterion = criterion.cuda() test_queue = torch.utils.data.DataLoader(test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) model.drop_path_prob = args.drop_path_prob test_acc, test_obj = infer(test_queue, model, criterion, is_regression=is_regression) logging.info('test_acc (R^2 for regression) %f', test_acc) weights_foldername = os.path.dirname(args.model_path) with open(os.path.join(weights_foldername, "test.txt"), "w") as f: f.write(str(test_acc))
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype, output_height=args.img_cropped_height, output_width=args.img_cropped_width) model = model.cuda() utils.load(model, args.model_path) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.MSELoss() criterion = criterion.cuda() test_transform, test_valid_transform = utils._data_trainsforms_denosining_dataset( args) test_data = DENOISE_DATASET_TEST(root=args.data, train_folder=args.train_data, label_folder=args.label_data, train=True, transform=test_transform, target_transform=test_transform) # _, test_transform = utils._data_transforms_cifar10(args) # test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform) test_queue = torch.utils.data.DataLoader(test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) model.drop_path_prob = args.drop_path_prob test_acc, psnr = infer(test_queue, model, criterion) logging.info('test_acc %f', test_acc) logging.info('Final psnr_acc %f', psnr)
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype) model = model.cuda() utils.load(model, args.model_path) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() data_dir = '../data/kmnist/' data_augmentations = transforms.ToTensor() # Load the Data here test_dataset = K49(data_dir, False, data_augmentations) test_queue = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) model.drop_path_prob = args.drop_path_prob test_acc, test_obj = infer(test_queue, model, criterion) logging.info('test_acc %f', test_acc)
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled=True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) print('genotype') genotype = eval("genotypes.%s" % args.arch) print('network') model = Network(args.init_channels, args.n_class, args.layers, args.auxiliary, genotype) print('cuda') model = model.cuda() print('load') utils.load(model, args.model_path) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() #_, test_transform = utils._data_transforms_cifar10(args) #test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform) _, _, _, _,_,test_dat = utils2.get_data("custom", args.data,args.data,args.data, cutout_length=0, validation=True,validation2 = True,n_class = args.n_class, image_size = args.image_size) test_queue = torch.utils.data.DataLoader( test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) model.drop_path_prob = args.drop_path_prob a = 2/0 test_acc, test_obj = infer(test_queue, model, criterion) logging.info('test_acc %f', test_acc)
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled=True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, ntu_CLASSES, args.layers, args.auxiliary, genotype) model = model.cuda() utils.load(model, args.model_path) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() validset = dataset.MyDataset('/media/lab540/79eff75a-f78c-42f2-8902-9358e88bf654/lab540/Neura_auto_search/datasets/kinetics_convert/test.txt', transform = transform.ToTensor()) valid_queue = torch.utils.data.DataLoader(validset, batch_size=args.batch_size, shuffle=False, num_workers=1) # _, test_transform = utils._data_transforms_cifar10(args) # test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform) # # test_queue = torch.utils.data.DataLoader( # test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) model.drop_path_prob = args.drop_path_prob test_acc, test_obj = infer(valid_queue, model, criterion) logging.info('test_acc %f', test_acc)
def main(): # Setup args = cmd_argument_parser() create_logger(args.save) # Set the gpu device to be used # NOTE: Only operates on a single GPU if torch.cuda.is_available(): torch.cuda.set_device(int(args.gpu)) else: logging.info('no gpu device available') sys.exit(1) # Ensure seeds are set np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) # Hardware specific tuning torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True # Get the specific architecture to train genotype = genomes[args.arch] # Create the fixed network # Note: This differs from the Network used in model_search.py # TODO: Update the Network class model = Network(C=args.init_channels, num_classes=CIFAR_CLASSES, layers=args.layers, auxiliary=args.auxiliary, genotype=genotype) model = model.cuda() logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) # The loss function criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() # Optimizer used to adjust the models parameters as well as an optimizer # of the learning rate optimizer = torch.optim.SGD(params=model.parameters(), lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) lr_scheduler = CosineAnnealingLR(optimizer=optimizer, T_max=float(args.epochs)) # Get the transforms for both the train and validation data train_transform, valid_transform = utils._data_transforms_cifar10(args) # Get the data from torchvision's datasets train_data = CIFAR10(root=args.data, train=True, download=True, transform=train_transform) valid_data = CIFAR10(root=args.data, train=False, download=True, transform=valid_transform) # Create Dataloaders for both train_queue = DataLoader(train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=0) valid_queue = DataLoader(valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=0) for epoch in range(args.epochs): logging.info(f'epoch = {epoch}') logging.info(f'lr = {lr_scheduler.get_last_lr()}') # More likely to drop a path as epochs progress model.drop_path_prob = args.drop_path_prob * (epoch / args.epochs) train_acc, train_obj = train(train_queue, model, criterion, optimizer, args) with torch.no_grad(): valid_acc, valid_obj = infer(valid_queue, model, criterion, args) logging.info(f'train_acc = {train_acc}') logging.info(f'valid_acc = {valid_acc}') # Save the model for each epoch utils.save(model, os.path.join(args.save, 'weights.pt')) lr_scheduler.step()
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) # torch.cuda.set_device(args.gpu) device = torch.device("cuda") cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled=True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) # read data train_transform, valid_transform = utils._data_transforms_cifar10(args) if args.dataset == 'cifar10': args.data = '/home/work/dataset/cifar' train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform) classes = 10 if args.dataset == 'cifar100': args.data = '/home/work/dataset/cifar100' train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR100(root=args.data, train=False, download=True, transform=valid_transform) classes = 100 train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) # model genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, classes, args.layers, args.auxiliary, genotype) model = model.cuda() model.drop_path_prob = args.drop_path_prob flops, params = profile(model, inputs=(torch.randn(1, 3, 32, 32).cuda(),), verbose=False) logging.info('flops = %fM', flops / 1e6) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() optimizer = torch.optim.SGD( model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay ) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs)) best_val_acc = 0. if args.resume: # state = torch.load('/home/work/lixudong/code_work/sgas/cnn/full_train_s3_1-20200608/weights.pt') # state = torch.load('/home/work/lixudong/code_work/sgas/cnn/full_train_s2_factor1-20200609/weights.pt', map_location='cpu') # state = torch.load('/home/work/lixudong/code_work/sgas/cnn/full_train_s3_factor1-20200609/weights.pt', map_location='cpu') # state = torch.load('/home/work/lixudong/code_work/sgas/cnn/full_train_s3_0-20200608/weights.pt', map_location='cpu') # state = torch.load('/home/work/lixudong/code_work/sgas/cnn/full_train_s2_0-20200608/weights.pt', map_location='cpu') state = torch.load('/home/work/lixudong/code_work/sgas/cnn/full_train_s3_2-20200608/weights.pt', map_location='cpu') model.load_state_dict(state) model = model.to(device) for i in range(args.start_epoch): scheduler.step() best_val_acc = 97.19#97.34#97.32#94.92#94.6#97.2 for epoch in range(args.start_epoch, args.epochs): scheduler.step() logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0]) model.drop_path_prob = args.drop_path_prob * epoch / args.epochs train_acc, train_obj = train(train_queue, model, criterion, optimizer) logging.info('train_acc %f', train_acc) with torch.no_grad(): valid_acc, valid_obj = infer(valid_queue, model, criterion) if valid_acc > best_val_acc: best_val_acc = valid_acc utils.save(model, os.path.join(args.save, 'best_weights.pt')) # logging.info('valid_acc %f\tbest_val_acc %f', valid_acc, best_val_acc) logging.info('val_acc: {:.6}, best_val_acc: \033[31m{:.6}\033[0m'.format(valid_acc, best_val_acc)) state = { 'epoch': epoch, 'model_state': model.state_dict(), 'optimizer': optimizer.state_dict(), 'best_val_acc': best_val_acc } torch.save(state, os.path.join(args.save, 'weights.pt.tar'))
def main(args): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) models_folder = args.models_folder if not os.path.isdir(models_folder): logging.error("The models_folder argument %s is not a directory", models_folder) sys.exit(1) models = dict() for dir in os.listdir(models_folder): if dir.startswith("eval"): weights_file = os.path.join(models_folder, dir, "weights.pt") if os.path.exists(weights_file): arch = dir.split("-")[1] genotype = genotypes.__dict__.get(arch, None) if genotype is not None: model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype) model = model.cuda() utils.load(model, weights_file) model.drop_path_prob = args.drop_path_prob model.eval() models[arch] = model logging.info("%s param size = %fMB", dir, utils.count_parameters_in_MB(model)) else: logging.info("Ignoring %s because there is no genotype %s on genotype.py", dir, arch) if len(models) == 0: logging.error("No model was found on %s", models_folder) sys.exit(1) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() train_transform, test_transform = utils._data_transforms_cifar10(args) if args.set == 'cifar100': train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=test_transform) test_data = dset.CIFAR100(root=args.data, train=False, download=True, transform=test_transform) else: train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=test_transform) test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform) num_train = len(train_data) indices = list(range(num_train)) split = int(np.floor(args.train_portion * num_train)) train_queue = DataLoader( train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]), pin_memory=True, num_workers=4) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]), pin_memory=True, num_workers=4) test_queue = DataLoader( test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=4) if args.calculate: # return n_models x n_classes matrix of weights if args.per_class: weights = calc_ensemble(valid_queue, models, len(valid_data.classes)) else: weights = calc_ensemble(valid_queue, models) else: weights = torch.ones(len(models), device='cuda') logging.info('train final weights = %s', weights) # scale weights per maximum value per class test_acc, top5_acc, test_obj = infer(test_queue, models, criterion, weights / weights.amax(dim=0)) logging.info('test loss %e, acc top1: %.2f, acc top5 %.2f', test_obj, test_acc, top5_acc) train_acc, top5_acc, train_obj = infer(train_queue, models, criterion) logging.info('train loss %e, acc top1: %f, acc top5 %f', train_obj, train_acc, top5_acc)
config = nni.get_next_parameter() config = {"lambda_student": 0.5, "T_student": 5, "seed": 20} torch.manual_seed(config['seed']) torch.cuda.manual_seed(config['seed']) # trial_id = os.environ.get('NNI_TRIAL_JOB_ID') trial_id = 'combine_mode' dataset = args.dataset num_classes = 100 if dataset == 'cifar100' else 'cifar10' teacher_model = None print(args.TA) if args.TA == 'DARTS': genotype = eval("genotypes.%s" % args.arch) TA_model = Network(36, 10, layer, True, genotype) TA_model.cuda() # utils.load(student_model, 'cifar10_model.pt') TA_model.drop_path_prob = 0.2 else: TA_model = create_cnn_model(args.TA, dataset, use_cuda=args.cuda) student_model = create_cnn_model(args.student, dataset, use_cuda=args.cuda) train_config = { 'epochs': args.epochs, 'learning_rate': args.learning_rate, 'momentum': args.momentum, 'weight_decay': args.weight_decay, 'device': 'cuda' if args.cuda else 'cpu', 'is_plane': not is_resnet(args.student), 'trial_id': trial_id,
def main(): writerTf = SummaryWriter(comment='writerTf') if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) gpus = [int(i) for i in args.gpu.split(',')] if len(gpus) == 1: torch.cuda.set_device(int(args.gpu)) # torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) # cudnn.enabled=True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %s' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype) model = model.cuda() if len(gpus) > 1: print("True") model = nn.parallel.DataParallel(model, device_ids=gpus, output_device=gpus[0]) model = model.module logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) train_transform, valid_transform = utils._data_transforms_cifar10(args) train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform) train_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2) valid_queue = torch.utils.data.DataLoader(valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, float(args.epochs)) for epoch in range(args.epochs): scheduler.step() logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0]) model.drop_path_prob = args.drop_path_prob * epoch / args.epochs train_acc, train_obj = train(train_queue, model, criterion, optimizer) logging.info('train_acc %f', train_acc) with torch.no_grad(): valid_acc, valid_obj = infer(valid_queue, model, criterion) logging.info('valid_acc %f', valid_acc) utils.save(model, os.path.join(args.save, 'weights.pt')) writerTf.add_scalar('train_acc', train_acc, epoch) writerTf.add_scalar('train_obj', train_obj, epoch) writerTf.add_scalar('valid_acc', valid_acc, epoch) writerTf.add_scalar('valid_obj', valid_obj, epoch) writerTf.close()
def process_logs(args) -> DataFrame: data = [] for log in args.train: row = [] try: # evaluation stage metrics lines = str(log.readlines()) match = re.search(r"arch='(?P<name>.*?)'", lines) name = match.group("name") row.append(name) # l2_loss_2e01 -> 2e-01 weight_value = float(name.split("_")[2].replace("e", "e-")) row.append(weight_value) match = re.search(r"param size.*?(?P<value>\d*\.\d+)MB", lines) param_size = float(match.group("value")) row.append(param_size) for metric in [ TRAIN_LOSS, TRAIN_ACC, VALID_LOSS, VALID_ACC, TEST_LOSS, TEST_ACC ]: value = float( re.findall(rf'{metric}(?:uracy)? (?P<value>\d*\.\d+)', lines)[-1]) row.append(value) except Exception as e: print(f"Error '{e}' while processing file {log.name}") while len(row) < 9: row.append(None) try: # search stage metrics genotype = genotypes.__dict__[name] genotype_str = str(genotype) match = False for s_log in args.search: s_lines = str(s_log.readlines()) s_log.seek(0, 0) # ((?!\\n).)* = anything except new line escaped match = re.search( r"stats = (?P<stats>{((?!\\n).)*" + re.escape(genotype_str) + r".*?})\\n\",", s_lines) if match: stats = eval(match.group("stats")) # L2 loss case if list(stats.get(L1_LOSS).keys())[0][0] == -1: LOSS = L2_LOSS # L1 loss case elif list(stats.get(L2_LOSS).keys())[0][0] == -1: LOSS = L1_LOSS else: raise Exception("L1 and L2 loss have w = -1") values = list(stats.get(LOSS).values())[0] search_criterion_loss = values[CRITERION_LOSS] search_reg_loss = values[REG_LOSS] row.append(search_criterion_loss) row.append(search_reg_loss) search_acc = values[VALID_ACC] row.append(search_acc) break if not match: raise Exception(f"Didn't find {name} on eval logs") except Exception as e: print(f"Error '{e}' while processing file {log.name}") while len(row) < 12: row.append(None) try: # model profiling genotype = genotypes.__dict__[name] match = re.search(r"init_channels=(?P<value>\d+)", lines) init_channels = int(match.group("value")) match = re.search(r"layers=(?P<value>\d+)", lines) layers = int(match.group("value")) match = re.search(r"drop_path_prob=(?P<value>\d+\.\d+)", lines) drop_path_prob = float(match.group("value")) match = re.search(r"auxiliary=(?P<value>\w+)", lines) auxiliary = bool(match.group("value")) model = NetworkCIFAR(init_channels, 10, layers, auxiliary, genotype) model.cuda() model.drop_path_prob = drop_path_prob parameters, net_flops, total_time_gpu, total_time_cpu = model_profiling( model, name) row.append(parameters) row.append(net_flops) row.append(total_time_gpu) row.append(total_time_cpu) except Exception as e: print(f"Error '{e}' while processing file {log.name}") if len(row) > 0: data.append(row) df = pd.DataFrame(data, columns=[ MODEL_NAME, WEIGHT, PARAMETERS_DARTS, TRAIN_LOSS, TRAIN_ACC, VALID_LOSS, VALID_ACC, TEST_LOSS, TEST_ACC, SEARCH_CRIT_LOSS, SEARCH_REG_LOSS, SEARCH_ACC, PARAMETERS_OFA, FLOPS, LATENCY_GPU, LATENCY_CPU ]) df.set_index(keys=MODEL_NAME, inplace=True) df.sort_values(by=WEIGHT, inplace=True, ascending=False) pd.set_option("display.max_rows", None, "display.max_columns", None, "display.width", None) print(df) df.to_csv(args.output) return df
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, NTU_CLASSES, args.layers, args.auxiliary, genotype) model = model.cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) trainset = dataset_ntuxview.MyDataset( '/media/lab540/79eff75a-f78c-42f2-8902-9358e88bf654/lab540/Neura_auto_search/datasets/ntu112/ntu_cv/train.txt', transform=transform.ToTensor()) train_queue = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=2) validset = dataset_ntuxview.MyDataset( '/media/lab540/79eff75a-f78c-42f2-8902-9358e88bf654/lab540/Neura_auto_search/datasets/ntu112/ntu_cv/test.txt', transform=transform.ToTensor()) valid_queue = torch.utils.data.DataLoader(validset, batch_size=args.batch_size, shuffle=True, num_workers=2) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, float(args.epochs)) best_acc = 0.0 for epoch in range(args.epochs): scheduler.step() logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0]) model.drop_path_prob = args.drop_path_prob * epoch / args.epochs train_acc, train_obj = train(train_queue, model, criterion, optimizer) logging.info('train_acc %f', train_acc) valid_acc, valid_obj = infer(valid_queue, model, criterion) if valid_acc > best_acc: best_acc = valid_acc logging.info('valid_acc %f, best_acc %f', valid_acc, best_acc) end_time = time.time() duration = end_time - start_time print('Epoch time: %ds.' % duration) utils.save(model, os.path.join(args.save, 'weights.pt'))
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) #print(genotype) model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype) #stat(model, (1, 16, 16)) model = model.cuda() # input_size=(1,2,128) ''' Input = torch.randn(1, 1, 2, 128) Input = Input.type(torch.cuda.FloatTensor) macs, params = profile(model, inputs=(Input,), custom_ops={Network: Network.forward}) macs, params = clever_format([macs, params], "%.3f") print(macs, params) summary(model,(1,16,16)) ''' logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) ''' train_transform, valid_transform = utils._data_transforms_cifar10(args) if args.set=='cifar100': train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR100(root=args.data, train=False, download=True, transform=valid_transform) else: train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform) #train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform) #valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform) train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) ''' x = loadmat("/data/wx/PC-DARTS/data/datashujudoppler=100.mat") x = x.get('train_data') x = np.reshape(x, [-1, 1, 2, 128]) data_num = 22000 y1 = np.zeros([data_num, 1]) y2 = np.ones([data_num, 1]) y3 = np.ones([data_num, 1]) * 2 y4 = np.ones([data_num, 1]) * 3 y5 = np.ones([data_num, 1]) * 4 y6 = np.ones([data_num, 1]) * 5 y7 = np.ones([data_num, 1]) * 6 y8 = np.ones([data_num, 1]) * 7 y = np.vstack((y1, y2, y3, y4, y5, y6, y7, y8)) y = np.array(y) X_train, X_val, Y_train, Y_val = train_test_split(x, y, test_size=0.3, random_state=30) X_train = torch.from_numpy(X_train) Y_train = torch.from_numpy(Y_train) X_train = X_train.type(torch.FloatTensor) Y_train = Y_train.type(torch.LongTensor) Y_train = Y_train.type(torch.LongTensor) # Y_train=np.reshape(Y_train,(16800,4)) Y_train = Y_train.squeeze() print(Y_train.type) print(Y_train) print(X_train.shape, Y_train.shape) train_Queue = torch.utils.data.TensorDataset(X_train, Y_train) print(train_Queue) X_val = torch.from_numpy(X_val) Y_val = torch.from_numpy(Y_val) X_val = X_val.type(torch.FloatTensor) Y_val = Y_val.type(torch.LongTensor) # Y_train = one_hot_embedding(Y_train, 4) Y_val = Y_val.type(torch.LongTensor) Y_val = Y_val.squeeze() print(Y_val.type, Y_val) valid_Queue = torch.utils.data.TensorDataset(X_val, Y_val) train_queue = torch.utils.data.DataLoader(train_Queue, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2) valid_queue = torch.utils.data.DataLoader(valid_Queue, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, float(args.epochs)) best_acc = 0.0 writer = SummaryWriter('./logs/balei') ''' Input = torch.randn(1, 1, 2, 128) Input=Input.type(torch.cuda.FloatTensor) macs, params = profile(model, inputs=(Input,),custom_ops={Network:Network.forward}) macs, params = clever_format([macs, params], "%.3f") print(macs, params) ''' for epoch in range(args.epochs): scheduler.step() logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0]) model.drop_path_prob = args.drop_path_prob * epoch / args.epochs train_acc, train_obj = train(train_queue, model, criterion, optimizer) logging.info('train_acc %f', train_acc) writer.add_scalar('Train/acc', train_acc, epoch) TAcc.write(str(train_acc) + ",") writer.add_scalar('Train/loss', train_obj, epoch) Tloss.write(str(train_obj) + ",") valid_acc, valid_obj = infer(valid_queue, model, criterion) if valid_acc > best_acc: best_acc = valid_acc utils.save(model, os.path.join(args.save, 'weights_best_acc.pt')) logging.info('valid_acc %f, best_acc %f', valid_acc, best_acc) writer.add_scalar('Valid/acc', valid_acc, epoch) VAcc.write(str(valid_acc) + ",") writer.add_scalar('Valid/loss', valid_obj, epoch) Vloss.write(str(valid_obj) + ",") utils.save(model, os.path.join(args.save, 'weights.pt'))
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) random.seed(args.seed) np.random.seed( args.data_seed) # cutout and load_corrupted_data use np.random torch.cuda.set_device(args.gpu) cudnn.benchmark = False torch.manual_seed(args.seed) cudnn.enabled = True cudnn.deterministic = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) if args.arch == 'resnet': model = ResNet18(CIFAR_CLASSES).cuda() args.auxiliary = False elif args.arch == 'resnet50': model = ResNet50(CIFAR_CLASSES).cuda() args.auxiliary = False elif args.arch == 'resnet34': model = ResNet34(CIFAR_CLASSES).cuda() args.auxiliary = False else: genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype) model = model.cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) train_transform, test_transform = utils._data_transforms_cifar10(args) # Load dataset if args.dataset == 'cifar10': noisy_train_data = CIFAR10(root=args.data, train=True, gold=False, gold_fraction=0.0, corruption_prob=args.corruption_prob, corruption_type=args.corruption_type, transform=train_transform, download=True, seed=args.data_seed) gold_train_data = CIFAR10(root=args.data, train=True, gold=True, gold_fraction=1.0, corruption_prob=args.corruption_prob, corruption_type=args.corruption_type, transform=train_transform, download=True, seed=args.data_seed) test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform) elif args.dataset == 'cifar100': noisy_train_data = CIFAR100(root=args.data, train=True, gold=False, gold_fraction=0.0, corruption_prob=args.corruption_prob, corruption_type=args.corruption_type, transform=train_transform, download=True, seed=args.data_seed) gold_train_data = CIFAR100(root=args.data, train=True, gold=True, gold_fraction=1.0, corruption_prob=args.corruption_prob, corruption_type=args.corruption_type, transform=train_transform, download=True, seed=args.data_seed) test_data = dset.CIFAR100(root=args.data, train=False, download=True, transform=test_transform) num_train = len(gold_train_data) indices = list(range(num_train)) split = int(np.floor(args.train_portion * num_train)) if args.gold_fraction == 1.0: train_data = gold_train_data else: train_data = noisy_train_data train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]), pin_memory=True, num_workers=0) if args.clean_valid: valid_data = gold_train_data else: valid_data = noisy_train_data valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:]), pin_memory=True, num_workers=0) test_queue = torch.utils.data.DataLoader(test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, float(args.epochs)) if args.loss_func == 'cce': criterion = nn.CrossEntropyLoss().cuda() elif args.loss_func == 'rll': criterion = utils.RobustLogLoss(alpha=args.alpha).cuda() elif args.loss_func == 'forward_gold': corruption_matrix = train_data.corruption_matrix criterion = utils.ForwardGoldLoss(corruption_matrix=corruption_matrix) else: assert False, "Invalid loss function '{}' given. Must be in {'cce', 'rll'}".format( args.loss_func) for epoch in range(args.epochs): scheduler.step() logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0]) model.drop_path_prob = args.drop_path_prob * epoch / args.epochs train_acc, train_obj = train(train_queue, model, criterion, optimizer) logging.info('train_acc %f', train_acc) valid_acc, valid_obj = infer_valid(valid_queue, model, criterion) logging.info('valid_acc %f', valid_acc) test_acc, test_obj = infer(test_queue, model, criterion) logging.info('test_acc %f', test_acc) utils.save(model, os.path.join(args.save, 'weights.pt'))
def process_logs(args) -> DataFrame: # filter logs lines = str(args.log.readlines()) match = re.search(r"Selected regularization(?P<reg>.*?)\\n", lines) reg = match.group("reg") if L1_LOSS in reg: loss = L1_LOSS elif L2_LOSS in reg: loss = L2_LOSS else: raise RuntimeError("Cant decode line Selected regularization") match = re.finditer(r"hist = (?P<hist>.*?)\\n", lines) hist_str = list(match)[-1].group("hist") hist = eval(hist_str)[loss] print("Removing non-optimal samples") # filter out dominated points filter_hist(hist) data = [] for weight, result in hist.items(): row = [] name = create_genotype_name(weight, loss) try: row.append(name) weight_value = weight[0] row.append(weight_value) # {'train_acc': 25.035999994506835, # 'valid_acc': 20.171999999084473, # 'reg_loss': 16.01249122619629, # 'criterion_loss': 1.9922981262207031, # 'model_size': 1.81423, # 'genotype': Genotype(..)} for metric in [ SIZE, TRAIN_ACC, VALID_ACC, CRITERION_LOSS, REG_LOSS ]: row.append(result[metric]) except Exception as e: print(f"Error '{e}' while processing file {args.log} w={weight}") while len(row) < 7: row.append(None) try: # model profiling genotype = result[GENOTYPE] # using default from train.py for CIFAR10 model = NetworkCIFAR(36, 10, 20, False, genotype) model.cuda() model.drop_path_prob = 0.3 parameters, net_flops, total_time_gpu, total_time_cpu = model_profiling( model, name) row.append(parameters) row.append(net_flops) row.append(total_time_gpu) row.append(total_time_cpu) except Exception as e: print(f"Error '{e}' while processing file {args.log} w={weight}") raise e if len(row) > 0: data.append(row) df = pd.DataFrame(data, columns=[ MODEL_NAME, WEIGHT, "Params", SEARCH_TRAIN_ACC, SEARCH_VAL_ACC, SEARCH_CRIT_LOSS, SEARCH_REG_LOSS, "Parameters", FLOPS, "Latency GPU", LATENCY_CPU ]) df.set_index(keys=MODEL_NAME, inplace=True) df.sort_values(by=WEIGHT, inplace=True, ascending=False) pd.set_option("display.max_rows", None, "display.max_columns", None, "display.width", None) print(df) df.to_csv(args.output) return df
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) torch.cuda.empty_cache() cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype_path = os.path.join(utils.get_dir(), os.path.split(args.model_path)[0], 'genotype.txt') print(genotype_path) if os.path.isfile(genotype_path): with open(genotype_path, "r") as f: geno_raw = f.read() genotype = eval(geno_raw) else: genoname = os.path.join(utils.get_dir(), os.path.split(args.model_path)[0], 'genoname.txt') if os.path.isfile(genoname): with open(genoname, "r") as f: args.arch = f.read() genotype = eval("genotypes.%s" % args.arch) else: genotype = eval("genotypes.ADMM") model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype) model = model.cuda() utils.load(model, os.path.join(utils.get_dir(), args.model_path)) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() _, test_transform = utils._data_transforms_cifar10(args) datapath = os.path.join(utils.get_dir(), args.data) test_data = dset.CIFAR10(root=datapath, train=False, download=True, transform=test_transform) test_queue = torch.utils.data.DataLoader( test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) if args.task == "CIFAR100cf": _, test_transform = utils._data_transforms_cifar100(args) test_data = utils.CIFAR100C2F(root=datapath, train=False, download=True, transform=test_transform) test_indices = test_data.filter_by_fine(args.test_filter) test_queue = torch.utils.data.DataLoader( torch.utils.data.Subset(test_data, test_indices), batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) # TODO: extend each epoch or multiply number of epochs by 20%*args.class_filter else: if args.task == "CIFAR100": _, test_transform = utils._data_transforms_cifar100(args) test_data = dset.CIFAR100(root=datapath, train=False, download=True, transform=test_transform) else: _, test_transform = utils._data_transforms_cifar10(args) test_data = dset.CIFAR10(root=datapath, train=False, download=True, transform=test_transform) test_queue = torch.utils.data.DataLoader( test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) model.drop_path_prob = args.drop_path_prob test_acc, test_obj = infer(test_queue, model, criterion) logging.info('test_acc %f', test_acc)
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %s' % args.gpu) logging.info("args = %s", args) genotype = eval(args.arch) print(genotype) model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype) model = model.cuda() total_params = sum(p.numel() for p in model.parameters() if p.requires_grad) print(total_params) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) train_transform, valid_transform = utils._data_transforms_cifar10() train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform) train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=32, ) valid_queue = torch.utils.data.DataLoader(valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=32) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, float(args.epochs)) max_acc = 0 for epoch in range(args.epochs): model.drop_path_prob = args.drop_path_prob # model.drop_path_prob = args.drop_path_prob * epoch / args.epochs logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0]) train_acc, train_obj = train(train_queue, model, criterion, optimizer) logging.info('train_acc %f', train_acc) scheduler.step() valid_acc, valid_obj = infer(valid_queue, model, criterion) logging.info('valid_acc %f', valid_acc) logging.info('max acc:{}'.format(max_acc)) if valid_acc > max_acc: logging.info('Update Max Acc') max_acc = valid_acc utils.save(model, os.path.join(local_save, 'weights.pt')) dump_results((genotype, valid_acc / 100))
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype) model = model.cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) """ train_transform, valid_transform = utils._data_transforms_cifar10(args) train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform) train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) """ data_dir = '../data/kmnist/' data_augmentations = transforms.ToTensor() # Load the Data here train_dataset = K49(data_dir, True, data_augmentations) #test_dataset = K49(data_dir, False, data_augmentations) num_train = len(train_dataset) indices = list(range(num_train)) split = int(np.floor(args.train_portion * num_train)) train_queue = torch.utils.data.DataLoader( dataset=train_dataset, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]), pin_memory=True, num_workers=2) valid_queue = torch.utils.data.DataLoader( dataset=train_dataset, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler( indices[split:num_train]), pin_memory=True, num_workers=2) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, float(args.epochs)) for epoch in range(args.epochs): scheduler.step() logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0]) model.drop_path_prob = args.drop_path_prob * epoch / args.epochs train_acc, train_obj = train(train_queue, model, criterion, optimizer) logging.info('train_acc %f', train_acc) valid_acc, valid_obj = infer(valid_queue, model, criterion) logging.info('valid_acc %f', valid_acc) utils.save(model, os.path.join(args.save, 'weights.pt'))
def main(): if not torch.cuda.is_available(): logging.info('No GPU device available') sys.exit(1) np.random.seed(args.seed) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info("args = %s", args) logging.info("unparsed args = %s", unparsed) num_gpus = torch.cuda.device_count() genotype = eval("genotypes.%s" % args.arch) print('---------Genotype---------') logging.info(genotype) print('--------------------------') model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype) model = torch.nn.DataParallel(model) model = model.cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) if args.cifar100: train_transform, valid_transform = utils._data_transforms_cifar100( args) else: train_transform, valid_transform = utils._data_transforms_cifar10(args) if args.cifar100: train_data = dset.CIFAR100(root=args.tmp_data_dir, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR100(root=args.tmp_data_dir, train=False, download=True, transform=valid_transform) else: train_data = dset.CIFAR10(root=args.tmp_data_dir, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR10(root=args.tmp_data_dir, train=False, download=True, transform=valid_transform) train_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=args.workers) valid_queue = torch.utils.data.DataLoader(valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=args.workers) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, float(args.epochs)) best_acc = 0.0 for epoch in range(args.epochs): scheduler.step() logging.info('Epoch: %d lr %e', epoch, scheduler.get_lr()[0]) model.module.drop_path_prob = args.drop_path_prob * epoch / args.epochs model.drop_path_prob = args.drop_path_prob * epoch / args.epochs start_time = time.time() train_acc, train_obj = train(train_queue, model, criterion, optimizer) logging.info('Train_acc: %f', train_acc) valid_acc, valid_obj = infer(valid_queue, model, criterion) if valid_acc > best_acc: best_acc = valid_acc logging.info('Valid_acc: %f', valid_acc) end_time = time.time() duration = end_time - start_time print('Epoch time: %ds.' % duration) utils.save(model.module, os.path.join(args.save, 'weights.pt'))
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype) model = model.cuda() total = 0 for m in model.modules(): if isinstance(m, nn.BatchNorm2d): total += m.weight.data.shape[0] bn = torch.zeros(total) index = 0 for m in model.modules(): if isinstance(m, nn.BatchNorm2d): size = m.weight.data.shape[0] bn[index:(index + size)] = m.weight.data.abs().clone() index += size y, i = torch.sort(bn) thre_index = int(total * 0.5) thre = y[thre_index] pruned = 0 cfg = [] cfg_mask = [] for k, m in enumerate(model.modules()): if isinstance(m, nn.BatchNorm2d): weight_copy = m.weight.data.clone() mask = weight_copy.abs().gt(thre).float().cuda() pruned = pruned + mask.shape[0] - torch.sum(mask) m.weight.data.mul_(mask) m.bias.data.mul_(mask) cfg.append(int(torch.sum(mask))) cfg_mask.append(mask.clone()) print( 'layer index: {:d} \t total channel: {:d} \t remaining channel: {:d}' .format(k, mask.shape[0], int(torch.sum(mask)))) elif isinstance(m, nn.MaxPool2d): cfg.append('M') pruned_ratio = pruned / total print('Pre-processing Successful!') utils.load(model, args.model_path) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() snr = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20] sum = 0 TIM = 0 utils.load(model, args.model_path) for i in snr: data_num = 1000 test_data = loadmat("/data/wx/PC-DARTS/data/testdoppler=100/snr=" + str(i) + ".mat") x = test_data.get("train_data") print(x.shape) x = np.reshape(x, [-1, 1, 2, 128]) y1 = np.zeros([data_num, 1]) y2 = np.ones([data_num, 1]) y3 = np.ones([data_num, 1]) * 2 y4 = np.ones([data_num, 1]) * 3 y5 = np.ones([data_num, 1]) * 4 y6 = np.ones([data_num, 1]) * 5 y7 = np.ones([data_num, 1]) * 6 y8 = np.ones([data_num, 1]) * 7 y = np.vstack((y1, y2, y3, y4, y5, y6, y7, y8)) y = np.array(y) X_test = torch.from_numpy(x) Y_test = torch.from_numpy(y) X_test = X_test.type(torch.FloatTensor) Y_test = Y_test.type(torch.LongTensor) Y_test = Y_test.squeeze() test_Queue = torch.utils.data.TensorDataset(X_test, Y_test) test_queue = torch.utils.data.DataLoader(test_Queue, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) model.drop_path_prob = args.drop_path_prob if i == 0: Input = torch.randn(1, 1, 2, 128) Input = Input.type(torch.cuda.FloatTensor) macs, params = profile(model, inputs=(Input, )) macs, params = clever_format([macs, params], "%.3f") print("flops params") print(macs, params) summary(model, input_size=(1, 2, 128)) time1 = time.time() test_acc, test_obj, target, loggg = infer(test_queue, model, criterion) time2 = time.time() - time1 logging.info('第 %d snr test_acc %f', i, test_acc) sum += test_acc logging.info("第 %d snr time: %f", i, time2) TIM += time2 #print(target) #print(target.shape) #print(loggg.shape) target = target.cpu().detach().numpy() loggg = loggg.cpu().detach().numpy() cm = confusion_matrix(target, loggg) #print(cm) #plot_confusion_matrix(cm ,mods, title=" Confusion Matrix ( SNR=%d dB)" % (i)) ''' if i>=10: address_jpeg = '/data/wx/PC-DARTS/wx/picture/' + '100' + 'hz-CSS-snr=' + str(i) + '.pdf' plt.savefig(address_jpeg) plt.close('all') ''' #plt.show() ACC = sum / 11 TT = TIM / 11 print("average acc : ", ACC) print("average time : ", TT)
def main(): #判断是否有GPU可用 if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) #设置当前设备 cudnn.benchmark = True #加速计算 torch.manual_seed(args.seed) #为cpu设置随机数种子 cudnn.enabled=True #cuDNN是一个GPU加速深层神经网络原语库,开启cudnn torch.cuda.manual_seed(args.seed)#为当前GPU设置随机种子 #打印日志信息 logging.info('gpu device = %d' % args.gpu) #gpu device = 0 logging.info("args = %s", args) ''' args = Namespace(arch='DARTS', auxiliary=False, auxiliary_weight=0.4, batch_size=96, cutout=False, cutout_length=16, data='../data', drop_path_prob=0.2, epochs=600, gpu=0, grad_clip=5, init_channels=36, layers=20, learning_rate=0.025, model_path='saved_models', momentum=0.9, report_freq=50, save='eval-EXP-20190618-170816', seed=0, weight_decay=0.0003) ''' genotype = eval("genotypes.%s" % args.arch) #应该是输出一个框架类型。eval() 函数用来执行一个字符串表达式,并返回表达式的值 #from model import NetworkCIFAR as Network #文件模块 model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype) #model = Network(通道个数=36, CIFAR_CLASSES=10, 总体layers=20, args.auxiliary使用辅助塔, genotype=框架类型) model = model.cuda() #打印模型参数的大小,即所占空间 logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) #param size = 3.349342MB criterion = nn.CrossEntropyLoss() #定义损失函数 criterion = criterion.cuda() #定义优化器 optimizer = torch.optim.SGD( model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay ) #获得预处理之后的训练集和验证集 train_transform, valid_transform = utils._data_transforms_cifar10(args) #获取数据集 train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform) ''' Files already downloaded and verified Files already downloaded and verified ''' #对数据进行封装为Tensor,主要用来读取数据集 ''' pin_memory:If True, the data loader will copy tensors into CUDA pinned memory before returning them,在数据返回前,是否将数据复制到CUDA内存中 num_workers:加快数据导入速度,工作者数量,默认是0。使用多少个子进程来导入数据。设置为0,就是使用主进程来导入数据。注意:这个数字必须是大于等于0的,不能太大,2的时候报错 ''' train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=1) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=1) #优化器的学习率调整策略:采用CosineAnnealingLR,余弦退火调整学习率 scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs)) #默认epochs=600 for epoch in range(args.epochs): scheduler.step() #更新权重 logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0]) #epoch 0 lr 2.500000e-02 #进行dropout:大小与模型的深度相关,模型深度越深,dropout的概率越大,最大0.2 model.drop_path_prob = args.drop_path_prob * epoch / args.epochs #调用下面定义的函数train() train_acc, train_obj = train(train_queue, model, criterion, optimizer) ''' train_queue:要训练的队列 model:采用的model; criterion:定义的损失函数 optimizer:所采用的优化器 ''' logging.info('train_acc %f', train_acc) #打印当前epoch在训练集上的精度 #计算在验证集上的精度 valid_acc, valid_obj = infer(valid_queue, model, criterion) logging.info('valid_acc %f', valid_acc) #保存模型参数 utils.save(model, os.path.join(args.save, 'weights.pt'))
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) cudnn.benchmark = False torch.backends.cudnn.deterministic = True random.seed(args.seed) if args.arch == 'resnet': model = ResNet18(CIFAR_CLASSES).cuda() args.auxiliary = False elif args.arch == 'resnet50': model = ResNet50(CIFAR_CLASSES).cuda() args.auxiliary = False elif args.arch == 'resnet34': model = ResNet34(CIFAR_CLASSES).cuda() args.auxiliary = False else: genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype) model = model.cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) train_transform, test_transform = utils._data_transforms_cifar10(args) train_data = CIFAR10(root=args.data, train=True, gold=False, gold_fraction=0.0, corruption_prob=args.corruption_prob, corruption_type=args.corruption_type, transform=train_transform, download=True, seed=args.seed) gold_train_data = CIFAR10(root=args.data, train=True, gold=True, gold_fraction=1.0, corruption_prob=args.corruption_prob, corruption_type=args.corruption_type, transform=train_transform, download=True, seed=args.seed) num_train = len(train_data) indices = list(range(num_train)) split = int(np.floor(args.train_portion * num_train)) clean_train_queue = torch.utils.data.DataLoader( gold_train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]), pin_memory=True, num_workers=2) noisy_train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]), pin_memory=True, num_workers=2) # clean_train_list = [] # for input, target in clean_train_queue: # input = Variable(input).cuda() # target = Variable(target).cuda(async=True) # clean_train_list.append((input, target)) # # noisy_train_list = [] # for input, target in noisy_train_queue: # input = Variable(input).cuda() # target = Variable(target).cuda(async=True) # noisy_train_list.append((input, target)) clean_valid_queue = torch.utils.data.DataLoader( gold_train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:]), pin_memory=True, num_workers=2) noisy_valid_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:]), pin_memory=True, num_workers=2) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, float(args.epochs)) if args.loss_func == 'cce': criterion = nn.CrossEntropyLoss().cuda() elif args.loss_func == 'rll': criterion = utils.RobustLogLoss(alpha=args.alpha).cuda() elif args.loss_func == 'forward_gold': corruption_matrix = train_data.corruption_matrix criterion = utils.ForwardGoldLoss(corruption_matrix=corruption_matrix) else: assert False, "Invalid loss function '{}' given. Must be in {'cce', 'rll'}".format( args.loss_func) global gain for epoch in range(args.epochs): if args.random_weight: logging.info('Epoch %d, Randomly assign weights', epoch) model.drop_path_prob = args.drop_path_prob * epoch / args.epochs clean_obj, noisy_obj = infer_random_weight(clean_train_queue, noisy_train_queue, model, criterion) logging.info('clean loss %f, noisy loss %f', clean_obj, noisy_obj) gain = np.random.randint(1, args.grad_clip, size=1)[0] else: scheduler.step() logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0]) model.drop_path_prob = args.drop_path_prob * epoch / args.epochs train_acc, train_obj, another_obj = train(clean_train_queue, noisy_train_queue, model, criterion, optimizer) if args.clean_train: logging.info('train_acc %f, clean_loss %f, noisy_loss %f', train_acc, train_obj, another_obj) else: logging.info('train_acc %f, clean_loss %f, noisy_loss %f', train_acc, another_obj, train_obj) utils.save(model, os.path.join(args.save, 'weights.pt')) clean_valid_acc, clean_valid_obj = infer_valid(clean_valid_queue, model, criterion) logging.info('clean_valid_acc %f, clean_valid_loss %f', clean_valid_acc, clean_valid_obj) noisy_valid_acc, noisy_valid_obj = infer_valid(noisy_valid_queue, model, criterion) logging.info('noisy_valid_acc %f, noisy_valid_loss %f', noisy_valid_acc, noisy_valid_obj)