def get_transform(train): transforms = [] if train: transforms.append(Rescale(256)) transforms.append(RandomCrop(224)) else: transforms.append(Rescale(224)) transforms.append(ToTensor()) return Compose(transforms)
def init(batch_size_labeled, batch_size_pseudo, state, split, input_sizes, sets_id, std, mean, keep_scale, reverse_channels, data_set, valtiny, no_aug): # Return data_loaders/data_loader # depending on whether the state is # 0: Pseudo labeling # 1: Semi-supervised training # 2: Fully-supervised training # 3: Just testing # For labeled set divisions split_u = split.replace('-r', '') split_u = split_u.replace('-l', '') # Transformations (compatible with unlabeled data/pseudo labeled data) # ! Can't use torchvision.Transforms.Compose if data_set == 'voc': base = base_voc workers = 4 transform_train = Compose([ ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels), # RandomResize(min_size=input_sizes[0], max_size=input_sizes[1]), RandomScale(min_scale=0.5, max_scale=1.5), RandomCrop(size=input_sizes[0]), RandomHorizontalFlip(flip_prob=0.5), Normalize(mean=mean, std=std) ]) if no_aug: transform_train_pseudo = Compose([ ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels), Resize(size_image=input_sizes[0], size_label=input_sizes[0]), Normalize(mean=mean, std=std) ]) else: transform_train_pseudo = Compose([ ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels), # RandomResize(min_size=input_sizes[0], max_size=input_sizes[1]), RandomScale(min_scale=0.5, max_scale=1.5), RandomCrop(size=input_sizes[0]), RandomHorizontalFlip(flip_prob=0.5), Normalize(mean=mean, std=std) ]) # transform_pseudo = Compose( # [ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels), # Resize(size_image=input_sizes[0], size_label=input_sizes[0]), # Normalize(mean=mean, std=std)]) transform_test = Compose([ ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels), ZeroPad(size=input_sizes[2]), Normalize(mean=mean, std=std) ]) elif data_set == 'city': # All the same size (whole set is down-sampled by 2) base = base_city workers = 8 transform_train = Compose([ ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels), # RandomResize(min_size=input_sizes[0], max_size=input_sizes[1]), Resize(size_image=input_sizes[2], size_label=input_sizes[2]), RandomScale(min_scale=0.5, max_scale=1.5), RandomCrop(size=input_sizes[0]), RandomHorizontalFlip(flip_prob=0.5), Normalize(mean=mean, std=std), LabelMap(label_id_map_city) ]) if no_aug: transform_train_pseudo = Compose([ ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels), Resize(size_image=input_sizes[0], size_label=input_sizes[0]), Normalize(mean=mean, std=std) ]) else: transform_train_pseudo = Compose([ ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels), # RandomResize(min_size=input_sizes[0], max_size=input_sizes[1]), Resize(size_image=input_sizes[2], size_label=input_sizes[2]), RandomScale(min_scale=0.5, max_scale=1.5), RandomCrop(size=input_sizes[0]), RandomHorizontalFlip(flip_prob=0.5), Normalize(mean=mean, std=std) ]) # transform_pseudo = Compose( # [ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels), # Resize(size_image=input_sizes[0], size_label=input_sizes[0]), # Normalize(mean=mean, std=std), # LabelMap(label_id_map_city)]) transform_test = Compose([ ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels), Resize(size_image=input_sizes[2], size_label=input_sizes[2]), Normalize(mean=mean, std=std), LabelMap(label_id_map_city) ]) else: base = '' # Not the actual test set (i.e.validation set) test_set = StandardSegmentationDataset( root=base, image_set='valtiny' if valtiny else 'val', transforms=transform_test, label_state=0, data_set=data_set) val_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=batch_size_labeled + batch_size_pseudo, num_workers=workers, shuffle=False) # Testing if state == 3: return val_loader else: # Fully-supervised training if state == 2: labeled_set = StandardSegmentationDataset( root=base, image_set=(str(split) + '_labeled_' + str(sets_id)), transforms=transform_train, label_state=0, data_set=data_set) labeled_loader = torch.utils.data.DataLoader( dataset=labeled_set, batch_size=batch_size_labeled, num_workers=workers, shuffle=True) return labeled_loader, val_loader # Semi-supervised training elif state == 1: pseudo_labeled_set = StandardSegmentationDataset( root=base, data_set=data_set, mask_type='.npy', image_set=(str(split_u) + '_unlabeled_' + str(sets_id)), transforms=transform_train_pseudo, label_state=1) labeled_set = StandardSegmentationDataset( root=base, data_set=data_set, image_set=(str(split) + '_labeled_' + str(sets_id)), transforms=transform_train, label_state=0) pseudo_labeled_loader = torch.utils.data.DataLoader( dataset=pseudo_labeled_set, batch_size=batch_size_pseudo, num_workers=workers, shuffle=True) labeled_loader = torch.utils.data.DataLoader( dataset=labeled_set, batch_size=batch_size_labeled, num_workers=workers, shuffle=True) return labeled_loader, pseudo_labeled_loader, val_loader else: # Labeling unlabeled_set = StandardSegmentationDataset( root=base, data_set=data_set, mask_type='.npy', image_set=(str(split_u) + '_unlabeled_' + str(sets_id)), transforms=transform_test, label_state=2) unlabeled_loader = torch.utils.data.DataLoader( dataset=unlabeled_set, batch_size=batch_size_labeled, num_workers=workers, shuffle=False) return unlabeled_loader
from engine import get_detection_model, train_one_epoch, evaluate, train from utils.dataset import PennFudanDataset from utils.transforms import Compose, ToTensor from utils.utils import collate_fn, show_sample root_path = "../data" save_path = "../models" show_example = False num_epochs = 10 batch_size = 1 device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') if __name__ == "__main__": # Create Dataloader dataset = PennFudanDataset(root_path, transforms=Compose([ToTensor()])) # split the dataset in train and test set torch.manual_seed(42) indices = torch.randperm(len(dataset)).tolist() dataset_train = torch.utils.data.Subset(dataset, indices[:-50]) dataset_test = torch.utils.data.Subset(dataset, indices[-50:]) if not os.path.exists(save_path): os.makedirs(save_path) train_loader = DataLoader(dataset_train, batch_size=batch_size, shuffle=True, collate_fn=collate_fn) test_loader = DataLoader(dataset_test, batch_size=batch_size, shuffle=True, collate_fn=collate_fn) if show_example: img, target = dataset[10]
def polyfit2coords_tusimple_with_bounded_classifier(lane_pred, img, classifier, classifier_resize_shape=(), sample_rate=1, crop_h=0, resize_shape=None, y_px_gap=20, pts=None, ord=3): if resize_shape is None: resize_shape = lane_pred.shape crop_h = 0 h, w = lane_pred.shape H, W = resize_shape coordinates = [] if pts is None: pts = round(H / 2 / y_px_gap) transform_x = Compose(Resize(classifier_resize_shape), ToTensor()) flagged = 0 for i in [idx for idx in np.unique(lane_pred) if idx != 0]: ys_pred, xs_pred = np.where(lane_pred == i) debug = False with warnings.catch_warnings(): warnings.filterwarnings('error') try: stabilized_lane = LaneDefense.get_stabilized_lane( xs_pred, ys_pred, img, sample_rate=sample_rate, ord=ord) except RankWarning as e: debug = True stabilized_lane = LaneDefense.get_stabilized_lane( xs_pred, ys_pred, img, sample_rate=sample_rate, ord=ord) if len(stabilized_lane) > 0: if debug: plt.imshow(stabilized_lane) plt.show() # plt.imshow(stabilized_lane) class_logit = classifier( transform_x({ "img": stabilized_lane })["img"].cuda().unsqueeze(0)).detach().cpu().numpy() classification = 1 / (1 + np.exp(-class_logit)) > 0.78 # classification = output_sum > 2 # print(output_list) # print(classification) # print("-----------") else: classification = True if classification: poly_params = np.polyfit(ys_pred, xs_pred, deg=ord) ys = np.array([ h - y_px_gap / (H - crop_h) * h * i for i in range(1, pts + 1) ]) xs = np.polyval(poly_params, ys) y_min, y_max = np.min(ys_pred), np.max(ys_pred) coordinates.append([[ int(x / w * W) if x >= 0 and x < w and ys[i] >= y_min and ys[i] <= y_max else -1, H - y_px_gap * (i + 1) ] for (x, i) in zip(xs, range(pts))]) else: flagged += 1 return coordinates