if os.path.exists(save_path): shutil.rmtree(save_path) os.makedirs(save_path) if config.dataset_from_folder: train_dataloaders, val_dataloaders, train_labels_number, _ = get_dataloader_from_folder( data_root, config.image_size, transforms, mean, std, config.batch_size, config.multi_scale) train_dataloaders, val_dataloaders, train_labels_number_folds = [ train_dataloaders ], [val_dataloaders], [train_labels_number] else: get_dataloader = GetDataloader( data_root, folds_split=folds_split, test_size=test_size, choose_dataset=config.choose_dataset, load_split_from_file=config.load_split_from_file) train_dataloaders, val_dataloaders, _, _ = get_dataloader.get_dataloader( config.batch_size, config.image_size, mean, std, transforms=transforms) for fold_index, [train_loader, valid_loader ] in enumerate(zip(train_dataloaders, val_dataloaders)): if fold_index in config.selected_fold: demo_predicts = DemoResults( config, weight_path,
gray_prob=config.gray_prob) else: transforms = None if config.dataset_from_folder: train_dataloaders, val_dataloaders = get_dataloader_from_folder( data_root, config.image_size, transforms, mean, std, config.batch_size, only_official, only_self, multi_scale, config.auto_aug) train_dataloaders, val_dataloaders = [train_dataloaders ], [val_dataloaders] else: get_dataloader = GetDataloader( data_root, folds_split=folds_split, test_size=test_size, only_self=only_self, only_official=only_official, selected_labels=selected_labels, val_official=val_official, load_split_from_file=load_split_from_file, auto_aug=auto_aug) train_dataloaders, val_dataloaders = get_dataloader.get_dataloader( config.batch_size, config.image_size, mean, std, transforms=transforms, multi_scale=multi_scale, val_multi_scale=val_multi_scale) for fold_index, [train_loader, valid_loader ] in enumerate(zip(train_dataloaders, val_dataloaders)):
pickle.dump({'seed': seed}, f, -1) return writer, TIMESTAMP if __name__ == "__main__": config = get_classify_config() config.lr = 3e-4 # 重新设置学习率 data_root = config.dataset_root folds_split = config.n_splits test_size = config.val_size mean = (0.485, 0.456, 0.406) std = (0.229, 0.224, 0.225) if config.augmentation_flag: transforms = DataAugmentation(config.erase_prob, full_aug=True, gray_prob=config.gray_prob) else: transforms = None get_dataloader = GetDataloader(data_root, folds_split=folds_split, test_size=test_size) train_dataloaders, val_dataloaders = get_dataloader.get_dataloader( config.batch_size, config.image_size, mean, std, transforms=transforms) for fold_index, [train_loader, valid_loader ] in enumerate(zip(train_dataloaders, val_dataloaders)): if fold_index in config.selected_fold: train_val = TrainVal(config, fold_index) train_val.train(train_loader, valid_loader)
config.data_local, config.image_size, transforms, mean, std, config.batch_size, multi_scale, ) train_dataloaders, val_dataloaders, train_labels_number_folds = [ train_dataloaders ], [val_dataloaders], [train_labels_number] else: get_dataloader = GetDataloader( config.data_local, folds_split=folds_split, test_size=test_size, label_names_path=config.local_data_root + 'label_id_name.json', choose_dataset=config.choose_dataset, load_split_from_file=config.load_split_from_file) train_dataloaders, val_dataloaders, train_labels_number_folds, _ = get_dataloader.get_dataloader( config.batch_size, config.image_size, mean, std, transforms=transforms, multi_scale=multi_scale, draw_distribution=False) for fold_index, [train_loader, valid_loader, train_labels_number] in enumerate(