def CalculateRay(self, screen, x, y):
        # https://learnopengl.com/Lighting/Basic-Lighting for shading
        steps = 0
        depth = 0
        max_steps = 1
        x_angle = MapVals(x, 0, self.WIDTH, -self.FOV / 2,
                          self.FOV / 2) * self.ASP  # the x angle of this ray
        y_angle = MapVals(y, 0, self.HEIGHT, -self.FOV / 2,
                          self.FOV / 2)  # the y angle of this ray
        direction = np.array([math.sin(x_angle),
                              math.sin(y_angle),
                              1])  # turning these angles into a vector
        for i in range(0, self.MARCH_STEPS):
            point = (self.camera.transform.xyz + depth) * direction
            dist = self.SceneSDF(point)
            if dist < self.EPSILON:
                # INSIDE SURFACE
                norm = self.SurfaceNormalEstimate(point)

                ambiend_rgb = [0.1, 0.1, 0.1]
                diff_rgb = [0, 0, 0]
                spec_rgb = [0, 0, 0]
                sum_rgb = [0, 0, 0]
                light_level = len(self.lights)
                if (len(self.lights) == 0):
                    light_level = 1
                for light in self.lights:
                    light_dir = Normalize(light.transform.xyz - point)
                    # Calculate diffuse intensity
                    diff_intensity = np.dot(light_dir, norm)
                    # Calculate specular intensity
                    spec_power = 32
                    spec_strength = 1
                    reflect_dir = Reflect(light_dir, norm)
                    spec = math.pow(np.dot(light_dir, reflect_dir), spec_power)
                    spec_intensity = spec_strength * spec
                    for i in range(0, 3):
                        diff_rgb[i] += (diff_intensity * light.colour[i])
                        spec_rgb[i] += (spec_intensity * light.colour[i])
                for i in range(0, 3):
                    sum = ambiend_rgb[i] + diff_rgb[i] + spec_rgb[i]
                    if sum < 0:
                        sum = 0
                    #sum_rgb[i]=int(MapVals(sum, 0, light_level, 0, 255))
                    sum_rgb[i] = int(MapVals(sum, 0, 1, 0, 255))
                    if sum_rgb[i] > 255:
                        sum_rgb[i] = 255
                return (sum_rgb[0], sum_rgb[1], sum_rgb[2])
            depth += dist
            steps += 1
            if depth >= self.END:
                if steps > max_steps:
                    max_steps = steps
                # Gone too far
                # Apply edge glow
                if steps >= self.GLOW_THRESHOLD:
                    col = MapVals(steps, self.GLOW_THRESHOLD, max_steps + 1,
                                  self.BACKGROUND_COLOUR[0], 255)
                    return (col, col, col)
                return self.BACKGROUND_COLOUR
Exemple #2
0
    def normalizeVariable(self):
        # Get selected variable in preview Tab widget
        selected_var_idx = [
            self.tableWidget_signal_preview.selectedIndexes()[i].column()
            for i in range(
                len(self.tableWidget_signal_preview.selectedIndexes()))
        ]
        selected_var_idx = sorted(set(selected_var_idx))

        selected_variables = []
        for i in selected_var_idx:
            if i != 0:  #ignore if TimeIndex is selected
                selected_variable = self.my_signal._signal_data.columns.values[
                    i - 1]

                min_value, min_ok = QtGui.QInputDialog.getInt(
                    self,
                    'Normalize Variable',
                    "Give a minimal value for the normalization of " +
                    selected_variable + " :",
                    value=0)
                if min_ok:
                    max_value, max_ok = QtGui.QInputDialog.getInt(
                        self,
                        'Normalize Variable',
                        "Give a maximal value for the normalization of " +
                        selected_variable + " :",
                        value=min_value + 1)
                    if max_ok:
                        self.my_signal._signal_data[
                            selected_variable] = Normalize.Normalize(
                                pd.DataFrame(self.my_signal.
                                             _signal_data[selected_variable]),
                                min_value, max_value)
                        self.displaySignalPreview()
Exemple #3
0
def todfa():
    states_json = eval(request.values['states'])
    states = {}
    for s in states_json:
        isInitial, isFinal = False, False
        if s.get("isInitial"):
            isInitial = True
        if s.get("isFinal"):
            isFinal = True
        states.update(
            {s['id']: state(s['label'], isFinal=isFinal, isInitial=isInitial)})
    transitions_json = eval(request.values['transitions'])
    for t in transitions_json:
        states[t['from']].connect(states[t['to']], t['label'], t['pop'],
                                  t['push'])
    machine = pda()
    for s in states.values():
        machine.add_state(s)
    #return (str(machine).replace("\n","<br>"))
    normal_machine = Normalize(machine)
    grammer = MakeCFG(normal_machine)

    states_json = []
    transitions_json = []
    transition_id = 0
    print(machine)
    for s in normal_machine.states:
        new_state = {"id": s.name, "label": s.name}
        if s.isFinal:
            new_state.update({"isFinal": 1})
        if s.isInitial:
            new_state.update({"isInitial": 1})
        states_json.append(new_state)
        tmp = s.get_neighbours()
        for t in tmp:
            for next_s in tmp[t]:
                new_edge = {
                    "id": str(transition_id),
                    "from": s.name,
                    "to": next_s,
                    "arrows": "to"
                }
                tmp0 = []
                for i in [0, 1, 2]:
                    if t[i] == "":
                        tmp0.append(chr(955))
                    else:
                        tmp0.append(str(t[i]))
                new_edge.update(
                    {"label": tmp0[0] + ", " + tmp0[1] + " --> " + tmp0[2]})
                transitions_json.append(new_edge)
                transition_id += 1
    good_grammer = ""
    for i in grammer:
        good_grammer += "<br>" + str(i) + " ---> " + "|".join(
            ["".join(j) for j in grammer[i]])
    return render_template("show_dfa.html",
                           states_json=states_json,
                           transitions_json=transitions_json,
                           grammer=good_grammer)
    def create_vgg_model(style_layers=[0, 7, 14, 27, 40],
                         content_layers=[30],
                         style_weight=[1.0, 1.0, 1.0, 1.0, 1.0],
                         content_weight=[1.0]):
        vgg = freeze_until(
            models.vgg19_bn(pretrained=True).to(self.device).features, 99999)

        style_losses = []
        content_losses = []
        calc_loss = False

        model = nn.Sequential(
            Normalize(
                torch.tensor([0.485, 0.456, 0.406]).to(self.device),
                torch.tensor([0.229, 0.224, 0.225]).to(self.device)))
        for i, layer in enumerate(vgg.children()):
            if (isinstance(layer, nn.ReLU)):
                model.add_module(str(i), nn.ReLU(inplace=False))
            else:
                model.add_module(str(i), layer)
            if (i in style_layers):
                style_loss_layer = StyleLoss(model(s_img).detach())
                model.add_module('Style-' + str(i), style_loss_layer)
                style_layers.remove(i)
                style_losses.append(style_loss_layer)
            if (i in content_layers):
                content_loss_layer = ContentLoss(model(c_img).detach())
                model.add_module('Content-' + str(i), content_loss_layer)
                content_layers.remove(i)
                content_losses.append(content_loss_layer)
            if (len(style_layers) == 0 and len(content_layers) == 0):
                break

        return model
Exemple #5
0
def main():
	args = get_args()
	root_dir = args.root_dir
	imgs = list(os.walk(root_dir))[0][2]

	save_dir = args.save_dir
	num_classes = 100 # CIFAR100
	model = ResNet.resnet(arch='resnet50', pretrained=False, num_classes=num_classes,
		use_att=args.use_att, att_mode=args.att_mode)
	#model = nn.DataParallel(model)
	#print(model)

	if args.resume:
		if os.path.isfile(args.resume):
			print(f'=> loading checkpoint {args.resume}')
			checkpoint = torch.load(args.resume)
			best_acc5 = checkpoint['best_acc5']
			model.load_state_dict(checkpoint['state_dict'], strict=False)
			print(f"=> loaded checkpoint {args.resume} (epoch {checkpoint['epoch']})")
			print(f'=> best accuracy {best_acc5}')
		else:
			print(f'=> no checkpoint found at {args.resume}')

	model_dict = get_model_dict(model, args.type)
	normalizer = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])

	for img_name in imgs:
		img_path = os.path.join(root_dir, img_name)
		pil_img = PIL.Image.open(img_path)
	
		torch_img = torch.from_numpy(np.asarray(pil_img))
		torch_img = torch_img.permute(2, 0, 1).unsqueeze(0)
		torch_img = torch_img.float().div(255)
		torch_img = F.interpolate(torch_img, size=(224, 224), mode='bilinear', align_corners=False)

		normalized_torch_img = normalizer(torch_img)

		gradcam = GradCAM(model_dict, True)
		gradcam_pp = GradCAMpp(model_dict, True)

		mask, _ = gradcam(normalized_torch_img)
		heatmap, result = visualize_cam(mask, torch_img)

		mask_pp, _ = gradcam_pp(normalized_torch_img)
		heatmap_pp, result_pp = visualize_cam(mask_pp, torch_img)
		
		images = torch.stack([torch_img.squeeze().cpu(), heatmap, heatmap_pp, result, result_pp], 0)

		images = make_grid(images, nrow=1)

		if args.use_att:
			save_dir = os.path.join(args.save_dir, 'att')
		else:
			save_dir = os.path.join(args.save_dir, 'no_att')

		os.makedirs(save_dir, exist_ok=True)
		output_name = img_name
		output_path = os.path.join(save_dir, output_name)

		save_image(images, output_path)
 def SurfaceNormalEstimate(self, pos):
     return Normalize(
         np.array([
             self.SceneSDF(np.array(
                 [pos[0] + self.EPSILON, pos[1], pos[2]])) - self.SceneSDF(
                     np.array([pos[0] - self.EPSILON, pos[1], pos[2]])),
             self.SceneSDF(np.array(
                 [pos[0], pos[1] + self.EPSILON, pos[2]])) - self.SceneSDF(
                     np.array([pos[0], pos[1] - self.EPSILON, pos[2]])),
             self.SceneSDF(np.array(
                 [pos[0], pos[1], pos[2] + self.EPSILON])) - self.SceneSDF(
                     np.array([pos[0], pos[1], pos[2] - self.EPSILON]))
         ]))
def generate_saliency_map(img, img_name):
    start = time.time()

    normalizer = Normalize(mean=[0.485, 0.456, 0.406],
                           std=[0.229, 0.224, 0.225])
    torch_img = torch.from_numpy(np.asarray(img)).permute(
        2, 0, 1).unsqueeze(0).float().div(255)
    torch_img = F.upsample(torch_img,
                           size=(512, 512),
                           mode='bilinear',
                           align_corners=False)
    normed_torch_img = normalizer(torch_img)

    resnet = models.resnet101(pretrained=True)
    resnet.eval()
    cam_dict = dict()
    model_dict = dict(type='resnet',
                      arch=resnet,
                      layer_name='layer4',
                      input_size=(512, 512))
    gradcam = GradCAM(model_dict, True)
    gradcam_pp = GradCAMpp(model_dict, True)

    images = []

    mask, _ = gradcam(normed_torch_img)
    heatmap, result = visualize_cam(mask, torch_img)
    mask_pp, _ = gradcam_pp(normed_torch_img)
    heatmap_pp, result_pp = visualize_cam(mask_pp, torch_img)
    images.append(
        torch.stack([
            torch_img.squeeze().cpu(), heatmap, heatmap_pp, result, result_pp
        ], 0))
    images = make_grid(torch.cat(images, 0), nrow=1)

    # Only going to use result_pp
    output_dir = 'outputs'
    os.makedirs(output_dir, exist_ok=True)
    output_name = img_name
    output_path = os.path.join(output_dir, output_name)
    save_image(result_pp, output_path)

    end = time.time()
    duration = round(end - start, 2)
    return output_path
Exemple #8
0
invTransform = Denormalize(opt.normalizeMean, opt.normalizeNorm)

# set model

model = createModel(opt)
model.setup(opt)

# set dataloader

if opt.augment:
    transformList = [
        Resize(opt.loadSize),
        RandomCrop(opt.fineSize),
        RandomRotation(opt.rotate),
        ToTensor(),
        Normalize(opt.normalizeMean, opt.normalizeNorm),
        RandomHorizontalFlip(),
    ]
else:
    transformList = [
        Resize(opt.loadSize),
        ToTensor(),
        Normalize(opt.normalizeMean, opt.normalizeNorm)
    ]

transform = Compose(transformList)

datasetA = createDataset([opt.datasetA], transform=transform, outputFile=False)

datasetB = createDataset([opt.datasetB], transform=transform, outputFile=False)
    def setup(self):
        pkl_dir = self.config.split_dir
        with open(os.path.join(pkl_dir, "splits.pkl"), 'rb') as f:
            splits = pickle.load(f)

        tr_keys = splits[self.config.fold]['train']
        val_keys = splits[self.config.fold]['val']
        test_keys = splits[self.config.fold]['test']
        print("pkl_dir: ", pkl_dir)
        print("tr_keys: ", tr_keys)
        print("val_keys: ", val_keys)
        print("test_keys: ", test_keys)
        self.device = torch.device(
            self.config.device if torch.cuda.is_available() else "cpu")
        task = self.config.dataset_name
        self.train_data_loader = torch.utils.data.DataLoader(
            NucleusDataset(self.config.data_root_dir,
                           train=True,
                           transform=transforms.Compose([
                               Normalize(),
                               Rescale(self.config.patch_size),
                               ToTensor()
                           ]),
                           target_transform=transforms.Compose([
                               Normalize(),
                               Rescale(self.config.patch_size),
                               ToTensor()
                           ]),
                           mode="train",
                           keys=tr_keys,
                           taskname=task),
            batch_size=self.config.batch_size,
            shuffle=True)

        self.val_data_loader = torch.utils.data.DataLoader(
            NucleusDataset(self.config.data_root_dir,
                           train=True,
                           transform=transforms.Compose([
                               Normalize(),
                               Rescale(self.config.patch_size),
                               ToTensor()
                           ]),
                           target_transform=transforms.Compose([
                               Normalize(),
                               Rescale(self.config.patch_size),
                               ToTensor()
                           ]),
                           mode="val",
                           keys=val_keys,
                           taskname=self.config.dataset_name),
            batch_size=self.config.batch_size,
            shuffle=True)

        self.test_data_loader = torch.utils.data.DataLoader(
            NucleusDataset(self.config.data_root_dir,
                           train=True,
                           transform=transforms.Compose([
                               Normalize(),
                               Rescale(self.config.patch_size),
                               ToTensor()
                           ]),
                           target_transform=transforms.Compose([
                               Normalize(),
                               Rescale(self.config.patch_size),
                               ToTensor()
                           ]),
                           mode="test",
                           keys=test_keys,
                           taskname=self.config.dataset_name),
            batch_size=self.config.batch_size,
            shuffle=True)

        self.model = UNet(num_classes=self.config.num_classes,
                          in_channels=self.config.in_channels)
        #self.model = UNet()
        self.model.to(self.device)
        self.bce_weight = 0.5
        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=self.config.learning_rate)
        self.scheduler = ReduceLROnPlateau(self.optimizer, 'min')

        # If directory for checkpoint is provided, we load it.
        if self.config.do_load_checkpoint:
            if self.config.checkpoint_dir == '':
                print(
                    'checkpoint_dir is empty, please provide directory to load checkpoint.'
                )
            else:
                self.load_checkpoint(name=self.config.checkpoint_dir,
                                     save_types=("model"))

        self.save_checkpoint(name="checkpoint_start")
        self.elog.print('Experiment set up.')
Exemple #10
0
import torchvision.models as models
from torchvision.utils import make_grid, save_image
from collections import OrderedDict
from utils import visualize_cam, Normalize
from gradcam import GradCAM, GradCAMpp


img_dir = 'examples'
# img_name = 'collies.JPG'
# img_name = 'multiple_dogs.jpg'
# img_name = 'snake.JPEG'
img_name = "real_336_000034.jpg"
img_path = os.path.join(img_dir, img_name)
pil_img = PIL.Image.open(img_path)

normalizer = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
torch_img = torch.from_numpy(np.asarray(pil_img)).permute(2, 0, 1).unsqueeze(0).float().div(255).cuda()
torch_img = F.upsample(torch_img, size=(224, 224), mode='bilinear', align_corners=False)
normed_torch_img = normalizer(torch_img)

###
# resnet = models.resnet101(pretrained=True)
resnet = models.resnet101()  #别忘记传递必要的参数
resnet101_dict = resnet.state_dict()

state_dict = torch.load('./model/2real/extractor_8.pth')	#加载预先训练好net-a的.pth文件

new_state_dict = OrderedDict()		#不是必要的【from collections import OrderedDict】

new_state_dict = {k:v for k,v in state_dict.items() if k in resnet101_dict}	#删除net-b不需要的键
resnet101_dict.update(new_state_dict)	#更新参数
Exemple #11
0
        noisify3 = Noisify(noise_source3,
                           snr_high=config['muse_noise_srn_high'],
                           snr_low=config['muse_noise_srn_low'],
                           random=True)
        rir_files = open(config['rir_path']).read().split('\n')[:-1]
        random_rir_reader = RIRSource(rir_files,
                                      random=True,
                                      sample_rate=16000)
        reverb = Reverberate(rir_source=random_rir_reader)
        muse_augment = RandomChoice([noisify1, noisify2, noisify3])
        wav_augments = RandomApply([muse_augment, reverb], 0.25)
        transforms += [wav_augments]
    melspectrogram = LogMelSpectrogram(**config['fbank'])
    transforms += [melspectrogram]
    if config['normalize']:
        transforms += [Normalize(config['mean_std_file'])]

    if config['augment_mel']:
        #define spectrogram masking
        time_masking = RandomMasking(
            max_mask_count=config['max_time_mask'],
            max_mask_width=config['max_time_mask_width'],
            axis=-1)
        freq_masking = RandomMasking(
            max_mask_count=config['max_freq_mask'],
            max_mask_width=config['max_freq_mask_width'],
            axis=-2)

        mel_augments = RandomApply([freq_masking, time_masking], p=0.25)
        transforms += [mel_augments]
    transforms = Compose(transforms)
Exemple #12
0
    cfg['scale'] = 0.5
    # Original, mean 0.4679, std 0.2699
    # Gamma correction: mean 0.3977, std 0.2307
    if cfg['scale'] == 0.5:
        mnet_v2_mean = [0.4679]
        mnet_v2_std = [0.2699]
    else:
        mnet_v2_mean = [0.4679]
        mnet_v2_std = [0.2699]

    train_set = OpenEDS(root_path=root_path + 'train',
                        transform=transforms.Compose([
                            Rescale(cfg['scale']),
                            Brightness(brightness=(0.5, 2.75)),
                            ToTensor(),
                            Normalize(mnet_v2_mean, mnet_v2_std)
                        ]))

    val_set = OpenEDS(root_path=root_path + 'validation',
                      transform=transforms.Compose([
                          Rescale(cfg['scale']),
                          ToTensor(),
                          Normalize(mnet_v2_mean, mnet_v2_std)
                      ]))  #

    test_set = OpenEDS(root_path=root_path + 'test',
                       transform=transforms.Compose([
                           Rescale(cfg['scale']),
                           ToTensor(),
                           Normalize(mnet_v2_mean, mnet_v2_std)
                       ]))  #
Exemple #13
0
    cal_mean_std_iter = DataLoader(PersianAlphabetDataset(
        csv_files=['dataset/train_x.csv', 'dataset/train_y.csv']),
                                   batch_size=args.batch_size)
    mean, std = CalMeanStd0(
        cal_mean_std_iter)  # you have to pass a dataloader object

    # 					   ------------------
    # --------------------- building dataset
    # 					   ------------------
    #
    print(f"\t✅ building dataset pipeline from CSV files\n")
    # normalize image using calculated mean and std per channel
    # generally mean and std is a list of per channel values
    # in our case one value for std and mean cause we have one channel
    # --------------------------------------------------------------------
    transform = transforms.Compose([ToTensor(), Normalize(mean=mean, std=std)])
    training_transformed = PersianAlphabetDataset(
        csv_files=['dataset/train_x.csv', 'dataset/train_y.csv'],
        transform=transform)
    valid_transformed = PersianAlphabetDataset(
        csv_files=['dataset/test_x.csv', 'dataset/test_y.csv'],
        transform=transform)

    # 						---------------------------
    # --------------------- building dataloader objects
    # 						---------------------------
    #
    print(
        f"\t✅ building dataloader objects from training and valid data pipelines\n"
    )
    # -----------------------------------------------------
def test():
    test_path = '/home/WIN-UNI-DUE/smnemada/Master_Thesis/SegThor/data_sub/test'
    for patient in tqdm(os.listdir(test_path)): 
        count = 0
        area = 0
        
        file = patient
        x = file.split(".")
        filename = x[0] + '.' + x[1]

        print("patient = ", patient)
        test_set = SegThorDataset(test_path,
                                  patient=patient, 
                                  phase='test',
                                  transform=transforms.Compose([
                                         Rescale(1.0, labeled=False),
                                         Normalize(labeled=False),
                                         ToTensor(labeled=False)
                                  ]))

        test_loader = torch.utils.data.DataLoader(dataset=test_set, 
                                                  batch_size=1, 
                                                  shuffle=False)


        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        model = torch.load("models/model.pt")
        model.eval()

        '''
        with torch.no_grad():
            for batch_idx, sample in enumerate(test_loader):     
                images = sample['image'].to(device, dtype=torch.float)        
                outputs = model(images)

                print("tensor: {} and {} ".format(images.size(), outputs.size()))
                images = tensor_to_numpy(images)            
                max_idx = torch.argmax(outputs, 1, keepdim=True)
                max_idx = tensor_to_numpy(max_idx)
                print("numpy: {} and {} ".format(images.shape, max_idx.shape))

                fig=plt.figure()
                fig.add_subplot(1,2,1)
                plt.imshow(max_idx)
                fig.add_subplot(1,2,2)
                plt.imshow(images)
                plt.show()
#                fig.close()
                count = count + 1
                if count==150:
                    break
        '''
#        '''
        seg_vol_2d = zeros([len(test_set),  512, 512])
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        model = torch.load("models/model.pt")
        model.eval()
        model.to(device)
        
        with torch.no_grad():
            for batch_idx, sample in enumerate(test_loader):     
                images = sample['image'].to(device, dtype=torch.float)        
                outputs = model(images)

                images = tensor_to_numpy(images)            
                max_idx = torch.argmax(outputs, 1, keepdim=True)
                max_idx = tensor_to_numpy(max_idx)
                          
              #  for k in range(outputs.size(0)): 
              #  print(max_idx.shape)
                slice_v = max_idx[:,:]   
                slice_v = slice_v.astype(float32)
                slice_v = ndimage.interpolation.zoom(slice_v, zoom=1, order=0, mode='nearest', prefilter=True)
                seg_vol_2d[count,:,:] = slice_v
                count = count + 1
               
            segmentation = sitk.GetImageFromArray(seg_vol_2d, isVector=False)
            print(segmentation.GetSize())
            sitk.WriteImage(sitk.Cast( segmentation, sitk.sitkUInt8 ), filename, True) 
    # Test RGB-D data loader, transforms and utilities

    data_dir = 'sunrgbd/256'
    data_dir = 'sunrgbd/256_lite'

    #    data_transforms = Compose([Resize(224),
    #                                   RandomHorizontalFlip(),
    #                                   ToTensor(),
    #                                   Normalize(MEAN_RGB, STD_RGB, MEAN_DEPTH, STD_DEPTH))])
    #    data_transforms = Compose([RandomResizedCrop(224),
    #                                   ToTensor(),
    #                                   Normalize(MEAN_RGB, STD_RGB, MEAN_DEPTH, STD_DEPTH)])
    data_transforms = Compose([
        CenterCrop(224),
        ToTensor(),
        Normalize(MEAN_RGB, STD_RGB, MEAN_DEPTH, STD_DEPTH)
    ])

    rgbd_dataset = ImageFolder(os.path.join(data_dir, 'train'),
                               data_transforms)
    data_loader = DataLoader(rgbd_dataset,
                             batch_size=4,
                             shuffle=True,
                             num_workers=4)
    class_names = rgbd_dataset.classes

    print(class_names)

    rgbd_iter = iter(data_loader)

    # Get a batch of training data
Exemple #16
0
from tqdm import tqdm

from utils import load_data, Dx_cross_entropy, Normalize, chunks
from run_fat import attack_step
import numpy as np
import time

Dx_losses = {"logistic_regression": 123, "cross_entropy": Dx_cross_entropy}
losses = {
    "logistic_regression": 123,
    "cross_entropy": lambda x, y: torch.nn.functional.cross_entropy(x, y)
}

# todo: manage the gpu id
# todo: BUG when n_data mod n_workers is non-zero
norm = Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
# epsilon = 8./255


def sgd_step(f, data, label, sgd_n_steps, sgd_step_size, sgd_mb_size):
    opt = optim.SGD(f.parameters(), lr=sgd_step_size)
    for i in range(sgd_n_steps):
        opt.zero_grad()
        index = range(i * sgd_mb_size, (i + 1) * sgd_mb_size)
        loss_f = loss(f(data[index]), label[index])
        loss_f.backward()
        opt.step()


# def attack_step(model, data, label, epsilon, attack_lr=5e-3, mb_size=128):
#     delta = torch.zeros_like(data, requires_grad=True)
Exemple #17
0
        f.write('\n')

    cfg = dict()
    cfg['batch_size'] = 64

    cfg['scale'] = 0.5
    if cfg['scale'] == 0.5:
        mnet_v2_mean = [0.4679]
        mnet_v2_std = [0.2699]
    else:
        mnet_v2_mean = [0.4679]
        mnet_v2_std = [0.2699]

    train_set = OpenEDS(root_path=root_path + 'train',
                        transform=transforms.Compose(
                            [Rescale(cfg['scale']), ToTensor(), Normalize(mnet_v2_mean, mnet_v2_std)]))  #
    val_set = OpenEDS(root_path=root_path + 'validation',
                      transform=transforms.Compose(
                          [Rescale(cfg['scale']), ToTensor(), Normalize(mnet_v2_mean, mnet_v2_std)]))  #

    test_set = OpenEDS(root_path=root_path + 'test',
                       transform=transforms.Compose(
                           [Rescale(cfg['scale']), ToTensor(), Normalize(mnet_v2_mean, mnet_v2_std)]))  #

    loaders = {'train': torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True,
                                                    num_workers=args.num_workers),
               'val': torch.utils.data.DataLoader(val_set, batch_size=args.batch_size, shuffle=False,
                                                  num_workers=args.num_workers, pin_memory=False),
               'test': torch.utils.data.DataLoader(test_set, batch_size=args.batch_size, shuffle=False,
                                                   num_workers=args.num_workers, pin_memory=False)
               }
Exemple #18
0
        [T.Resize((256, 256)),
         T.CenterCrop((224, 224)),
         T.ToTensor()])
    dataset = SelectedImagenet(
        imagenet_val_dir='data/imagenet/ILSVRC2012_img_val',
        selected_images_csv='data/imagenet/selected_imagenet.csv',
        transform=trans)
    ori_loader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=8,
                                             pin_memory=False)
    model = MODEL.resnet.resnet50(
        state_dict_dir='attack/imagenet/models/ckpt/resnet50-19c8e357.pth')
    model.eval()
    model = nn.Sequential(Normalize(), model)
    model.to(device)

    if target_attack:
        label_switch = torch.tensor(
            list(range(500, 1000)) + list(range(0, 500))).long()
    label_ls = []
    for ind, (ori_img, label) in enumerate(ori_loader):
        label_ls.append(label)
        if target_attack:
            label = label_switch[label]

        ori_img = ori_img.to(device)
        img = ori_img.clone()
        m = 0
        for i in range(niters):
Exemple #19
0
import scipy
import math
from torch_geometric.data import Data
from utils import Normalize
import torch_geometric.transforms as T
#torch.cuda.set_device(1)
path = os.path.expanduser("./data/")
label_rate = {
    'Cora': 0.052,
    'Citeseer': 0.036,
    'Pubmed': 0.003,
    'Computers': 0.015,
    'Photo': 0.021
}

Norm = Normalize()


def get_dataset(name):
    if name in ['Cora', 'Citeseer', 'Pubmed']:
        dataset = Planetoid(path + name, name)
    elif name in ['Computers', 'Photo']:
        dataset = Amazon(path + name, name)
    else:
        raise Exception('Unknown dataset.')
    return dataset


def parse_index_file(filename):
    """Parse index file."""
    index = []
        acc = model.evaluate(test_loader=dtest)
        print("Iteration: {}, len(dl): {}, len(du): {},"
              " len(dh) {}, acc: {} ".format(iteration,
                                             len(dl.sampler.indices),
                                             len(du.sampler.indices),
                                             len(hcs_idx), acc))


if __name__ == "__main__":

    dataset_train = Caltech256Dataset(
        root_dir="../caltech256/256_ObjectCategories_train",
        transform=transforms.Compose(
            [SquarifyImage(),
             RandomCrop(224),
             Normalize(),
             ToTensor()]))

    dataset_test = Caltech256Dataset(
        root_dir="../caltech256/256_ObjectCategories_test",
        transform=transforms.Compose(
            [SquarifyImage(),
             RandomCrop(224),
             Normalize(),
             ToTensor()]))

    # Creating data indices for training and validation splits:
    random_seed = 123
    validation_split = 0.1  # 10%
    shuffling_dataset = True
    batch_size = 16
def train(epochs, batch_size, learning_rate):

    torch.manual_seed(1234)

    train_loader = torch.utils.data.DataLoader(SegThorDataset(
        "/home/WIN-UNI-DUE/smnemada/Master_Thesis/SegThor/data/train",
        phase='train',
        transform=transforms.Compose([Rescale(1.0),
                                      Normalize(),
                                      ToTensor()])),
                                               batch_size=batch_size,
                                               shuffle=True)
    '''
    # Loading validation data
    val_set = SegThorDataset("/home/WIN-UNI-DUE/smnemada/Master_Thesis/SegThor/data_val", phase='val',
                                   transform=transforms.Compose([
                                       Rescale(0.5),
                                       Normalize(),
                                       ToTensor2()
                                   ]))

    val_loader = torch.utils.data.DataLoader(dataset=val_set,
                                             batch_size=1,
                                             shuffle=False)
    '''

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = UNet().to(device)
    model.apply(weight_init)
    #optimizer = optim.Adam(model.parameters(), lr=learning_rate)    #learning rate to 0.001 for initial stage
    optimizer = optim.SGD(model.parameters(),
                          lr=0.01,
                          momentum=0.9,
                          weight_decay=0.00001)
    #optimizer = adabound.AdaBound(params = model.parameters(), lr = 0.001, final_lr = 0.1)

    for epoch in range(epochs):
        f = open('train_output.log', 'a')
        f.write('Epoch {}/{}\n'.format(epoch + 1, epochs))
        f.write('-' * 10)

        running_loss = 0.0
        running_loss_label = np.zeros(5)
        for batch_idx, sample in enumerate(train_loader):
            train_data, labels = sample['image'].to(
                device,
                dtype=torch.float), sample['label'].to(device,
                                                       dtype=torch.uint8)

            optimizer.zero_grad()
            output = model(train_data)

            loss_label, loss = dice_loss2(output, labels)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            for i in range(5):
                running_loss_label[i] += loss_label[i]

        epoch_loss = running_loss / len(train_loader)
        writer.add_scalar('Train/Loss', epoch_loss, epoch)
        f.write("\n Total Dice Loss: {:.4f}\n".format(epoch_loss))
        epoch_loss_class = np.true_divide(running_loss_label,
                                          len(train_loader))
        f.write(
            "Dice per class: Background = {:.4f} Eusophagus = {:.4f}  Heart = {:.4f}  Trachea = {:.4f}  Aorta = {:.4f}\n"
            .format(epoch_loss_class[0], epoch_loss_class[1],
                    epoch_loss_class[2], epoch_loss_class[3],
                    epoch_loss_class[4]))
        #f.write("Dice per class: Background = {:.4f} Eusophagus = {:.4f}\n".format(epoch_loss_class[0], epoch_loss_class[1]))
        f.close()

        if epoch % 4 == 0:
            os.makedirs("models", exist_ok=True)
            torch.save(model, "models/model.pt")

    # export scalar data to JSON for external processing
    writer.export_scalars_to_json("./all_scalars.json")
    writer.close()
    os.makedirs("models", exist_ok=True)
    torch.save(model, "models/model.pt")
# set model

model = createModel(opt)  # create a new model
model.setup(opt)  # set model

# set dataloader

if opt.augment:
    print("with data augmentation")
    transformList = [
        RandomRotation(10),
        RandomResizedCrop(),
        Resize(opt.loadSize),
        ToTensor(),
        Normalize([.485, .456, .406], [.229, .224, .225]),
        RandomHorizontalFlip(),
    ]
else:
    print("without data augmentation")
    transformList = [
        Resize(opt.loadSize),
        ToTensor(),
        Normalize([.485, .456, .406], [.229, .224, .225])
    ]

transform = Compose(transformList)

supervisedADataset = createDataset([opt.supervisedADataset],
                                   transform=transform,
                                   outputFile=False)[0]
    for i in range(len(segthor_dataset)):
        sample = segthor_dataset[i]
        
    #    print(i, sample['image'].size())
        plt.imshow(sample['image'])
        plt.show()
        if i == 50:
            break

    '''
    #    '''
    ## Loading data for training phase
    segthor_dataset = SegThorDataset(
        datapath=
        "/home/WIN-UNI-DUE/smnemada/Master_Thesis/SegThor/data_sub/train",
        phase='train',
        transform=transforms.Compose([
            Rescale(1.0, labeled=True),
            Normalize(labeled=True),
            JointTransform2D(crop=(288, 288), p_flip=0.5),
            ToTensor(labeled=True)
        ]))

    for i in range(len(segthor_dataset)):
        sample = segthor_dataset[i]

        print(i, sample['image'].size(), sample['label'].size())
        if i == 5:
            break
#    '''