def ukbb_sax_transform(self): train_transform = ts.Compose([ ts.PadNumpy(size=self.scale_size), ts.ToTensor(), ts.ChannelsFirst(), ts.TypeCast(['float', 'float']), ts.RandomFlip(h=True, v=True, p=self.random_flip_prob), ts.RandomAffine(rotation_range=self.rotate_val, translation_range=self.shift_val, zoom_range=self.scale_val, interp=('bilinear', 'nearest')), ts.NormalizeMedicPercentile(norm_flag=(True, False)), ts.RandomCrop(size=self.patch_size), ts.TypeCast(['float', 'long']) ]) valid_transform = ts.Compose([ ts.PadNumpy(size=self.scale_size), ts.ToTensor(), ts.ChannelsFirst(), ts.TypeCast(['float', 'float']), ts.NormalizeMedicPercentile(norm_flag=(True, False)), ts.SpecialCrop(size=self.patch_size, crop_type=0), ts.TypeCast(['float', 'long']) ]) return {'train': train_transform, 'valid': valid_transform}
def ultrasound_transform(self): train_transform = ts.Compose([ts.ToTensor(), ts.TypeCast(['float']), ts.AddChannel(axis=0), ts.SpecialCrop(self.patch_size,0), ts.RandomFlip(h=True, v=False, p=self.random_flip_prob), ts.RandomAffine(rotation_range=self.rotate_val, translation_range=self.shift_val, zoom_range=self.scale_val, interp=('bilinear')), ts.StdNormalize(), ]) valid_transform = ts.Compose([ts.ToTensor(), ts.TypeCast(['float']), ts.AddChannel(axis=0), ts.SpecialCrop(self.patch_size,0), ts.StdNormalize(), ]) return {'train': train_transform, 'valid': valid_transform}
patient_id_G_train = list(patient_id_G.difference(patient_id_G_test)) patient_id_H_train = list(patient_id_H.difference(patient_id_H_test)) transform_pipeline_train = tr.Compose([ AddGaussian(), AddGaussian(ismulti=False), tr.ToTensor(), tr.AddChannel(axis=0), tr.TypeCast('float'), # Attenuation((-.001, .1)), # tr.RangeNormalize(0,1), tr.RandomBrightness(-.2, .2), tr.RandomGamma(.9, 1.1), tr.RandomFlip(), tr.RandomAffine(rotation_range=5, translation_range=0.2, zoom_range=(0.9, 1.1)) ]) transform_pipeline_test = tr.Compose([ tr.ToTensor(), tr.AddChannel(axis=0), tr.TypeCast('float') # tr.RangeNormalize(0, 1) ]) transformed_images = Beijing_dataset(root_dir, patient_id_G_train, patient_id_H_train, transform=transform_pipeline_train)
from torchsample import transforms from tqdm import tqdm from models import Net from utils import ScalarEncoder, accuracy, AverageMeter, make_dataset, save_model, print_metrics from logger import Logger from sklearn.model_selection import KFold data = pd.read_json("data/train.json") data["band_1"] = data["band_1"].apply(lambda x: np.array(x).reshape(75, 75)) data["band_2"] = data["band_2"].apply(lambda x: np.array(x).reshape(75, 75)) data["inc_angle"] = pd.to_numeric(data["inc_angle"], errors="coerce") # Augmentation affine_transforms = transforms.RandomAffine(rotation_range=None, translation_range=0.1, zoom_range=(0.95, 1.05)) rand_flip = transforms.RandomFlip(h=True, v=False) std_normalize = transforms.StdNormalize() my_transforms = transforms.Compose([rand_flip, std_normalize]) # scalar encoder for incident angles encoder = ScalarEncoder(100, 30, 45) # using folding to create 5 train-validation sets to train 5 networks kf = KFold(n_splits=5, shuffle=True, random_state=100) kfold_datasets = [] networks = [] optimizers = [] for train_index, val_index in kf.split(data): train_dataset = make_dataset(data.iloc[train_index], encoder, my_transforms) val_dataset = make_dataset(data.iloc[val_index], encoder, my_transforms) kfold_datasets.append({"train": train_dataset, "val": val_dataset}) # A new net for each train-validation dataset
#inc_angle_test = np.nan_to_num(test['inc_angle'].values) target = np.array(data['is_iceberg'].values) # input, bins=100, min=0, max=0, out=None) → Tensor plt.scatter(np.arange(0, hist.num_bins), hist_test) #plt.scatter(inc_angle_tr, target) #plt.xlim(0, 48) #plt.hist(inc_angle_test, bins=100) #plt.hist(inc_angle_tr, bins=100) plt.show() del data full_img_tr = np.stack([band_1_tr, band_2_tr], axis=1) my_transforms = transforms.RandomAffine(rotation_range=180, translation_range=0.2, shear_range=None, zoom_range=(0.8, 1.2)) my_transforms = transforms.Compose(my_transforms.transforms) test_imgs = torch.from_numpy(full_img_tr).float().cuda() test_dataset = TensorDataset(test_imgs, input_transform=my_transforms) test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False) print("loader len:", len(test_loader)) while (True): #index = np.random.randint(0, len(data), 1) index = 0 for _data in tqdm(test_loader, total=len(test_loader)): print(index) plot_sample(_data.squeeze_().cpu())