コード例 #1
0
 def test_init_gpu(self):
     # TODO: More comprehensive test
     pfi_core(
         self.model.cuda(),
         self.batch_size,
         input_shape=[self.channels, self.img_size, self.img_size],
         use_cuda=True,
     )
コード例 #2
0
 def test_init_gpu(self):
     """
     TODO: More comprehensive test
     """
     pfi_core(
         self.model,
         self.BATCH_SIZE,
         input_shape=[self.channels, self.img_size, self.img_size],
         use_cuda=self.USE_GPU,
     )
コード例 #3
0
 def test_init_gpu(self):
     """
     TODO: More comprehensive test
     """
     pfi_core(
         self.model,
         self.img_size,
         self.img_size,
         self.BATCH_SIZE,
         use_cuda=self.USE_GPU,
     )
コード例 #4
0
    def test_fp32_cpu(self):
        # TODO: More comprehensive test

        self.model.to("cpu")
        self.model.eval()

        torch.no_grad()
        self.output = self.model(self.images)

        pfi_core(
            self.model,
            self.batch_size,
            input_shape=[self.channels, self.img_size, self.img_size],
            use_cuda=False,
        )
コード例 #5
0
    def setup_class(self):
        torch.manual_seed(0)

        self.BATCH_SIZE = 1
        self.WORKERS = 1
        self.channels = 3
        self.img_size = 32
        self.USE_GPU = False

        self.model, self.dataset = helper_setUp_CIFAR10_same(
            self.BATCH_SIZE, self.WORKERS)
        self.dataiter = iter(self.dataset)

        self.images, self.labels = self.dataiter.next()

        self.model.eval()
        with torch.no_grad():
            self.output = self.model(self.images)

        self.p = pfi_core(
            self.model,
            self.BATCH_SIZE,
            input_shape=[self.channels, self.img_size, self.img_size],
            use_cuda=self.USE_GPU,
        )
コード例 #6
0
    def setup_class(self):
        self.BATCH_SIZE = 4
        self.WORKERS = 1
        self.channels = 3
        self.img_size = 32
        self.LAYER_TYPES = [torch.nn.Conv2d]
        self.USE_GPU = False

        self.model, self.dataset = helper_setUp_CIFAR10_same(
            self.BATCH_SIZE, self.WORKERS
        )

        self.dataiter = iter(self.dataset)
        self.model.eval()

        torch.no_grad()
        self.images, self.labels = self.dataiter.next()
        self.output = self.model(self.images)

        self.p = pfi_core(
            self.model,
            self.BATCH_SIZE,
            input_shape=[self.channels, self.img_size, self.img_size],
            layer_types=self.LAYER_TYPES,
            use_cuda=self.USE_GPU,
        )
コード例 #7
0
    def setup_class(self):
        torch.manual_seed(0)

        self.BATCH_SIZE = 4
        self.WORKERS = 1
        self.img_size = 32
        self.USE_GPU = True

        self.model, self.dataset = helper_setUp_CIFAR10(self.BATCH_SIZE, self.WORKERS)
        self.dataiter = iter(self.dataset)

        self.model.cuda()

        self.images, self.labels = self.dataiter.next()
        self.images = self.images.cuda()

        self.model.eval()
        with torch.no_grad():
            self.output = self.model(self.images)

        self.p = pfi_core(
            self.model,
            self.img_size,
            self.img_size,
            self.BATCH_SIZE,
            use_cuda=self.USE_GPU,
        )
コード例 #8
0
    def test_fp16_gpu(self):
        # TODO: More comprehensive test

        self.model.to("cuda")
        self.model.half()
        self.model.eval()

        torch.no_grad()
        self.images = self.images.cuda()
        self.output = self.model(self.images.half())

        pfi_core(
            self.model,
            self.batch_size,
            input_shape=[self.channels, self.img_size, self.img_size],
            use_cuda=True,
        )
コード例 #9
0
    def test_fp32_cpu(self):
        """
        TODO: More comprehensive test
        """
        self.USE_GPU = False
        self.model.to("cpu")
        self.model.eval()

        torch.no_grad()
        self.output = self.model(self.images)

        pfi_core(
            self.model,
            self.BATCH_SIZE,
            input_shape=[self.channels, self.img_size, self.img_size],
            use_cuda=self.USE_GPU,
        )
コード例 #10
0
    def test_INT8_cpu(self):
        # TODO: More comprehensive test

        self.model.to("cpu")
        self.model = torch.quantization.quantize_dynamic(self.model,
                                                         {torch.nn.Linear},
                                                         dtype=torch.qint8)
        self.model.eval()

        with torch.no_grad():
            self.output = self.model(self.images)

        pfi_core(
            self.model,
            self.batch_size,
            input_shape=[self.channels, self.img_size, self.img_size],
            use_cuda=False,
        )
コード例 #11
0
    def test_orig_model_gpu(self):
        p = pfi_core(
            self.model,
            self.batch_size,
            input_shape=[self.channels, self.img_size, self.img_size],
            use_cuda=True,
        )

        if p.get_original_model() is not self.model:
            raise AssertionError
コード例 #12
0
    def test_fp32_gpu(self):
        """
        TODO: More comprehensive test
        """
        self.USE_GPU = True
        self.model.to("cuda")
        self.model.eval()

        torch.no_grad()
        self.images = self.images.cuda()
        self.output = self.model(self.images)

        pfi_core(
            self.model,
            self.img_size,
            self.img_size,
            self.BATCH_SIZE,
            use_cuda=self.USE_GPU,
        )
コード例 #13
0
    def test_orig_model_gpu(self):
        p = pfi_core(
            self.model,
            self.BATCH_SIZE,
            input_shape=[self.channels, self.img_size, self.img_size],
            use_cuda=self.USE_GPU,
        )

        self.faulty_model = p.get_original_model()
        if self.faulty_model is not self.model:
            raise AssertionError
コード例 #14
0
    def test_fp16_gpu(self):
        """
        TODO: More comprehensive test
        """
        self.USE_GPU = True
        self.model.to("cuda")

        self.model.half()
        self.model.eval()

        torch.no_grad()
        self.images = self.images.cuda()
        self.output = self.model(self.images.half())

        pfi_core(
            self.model,
            self.BATCH_SIZE,
            input_shape=[self.channels, self.img_size, self.img_size],
            use_cuda=self.USE_GPU,
        )
コード例 #15
0
    def test_orig_model_gpu(self):
        p = pfi_core(
            self.model,
            self.img_size,
            self.img_size,
            self.BATCH_SIZE,
            use_cuda=self.USE_GPU,
        )

        self.faulty_model = p.get_original_model()
        assert self.faulty_model is self.model
コード例 #16
0
    def test_INT8_cpu(self):
        """
        TODO: More comprehensive test
        """
        self.USE_GPU = False
        self.model.to("cpu")

        self.model = torch.quantization.quantize_dynamic(self.model,
                                                         {torch.nn.Linear},
                                                         dtype=torch.qint8)

        self.model.eval()

        with torch.no_grad():
            self.output = self.model(self.images)

        pfi_core(
            self.model,
            self.img_size,
            self.img_size,
            self.BATCH_SIZE,
            use_cuda=self.USE_GPU,
        )
コード例 #17
0
ファイル: test_neuron_fi.py プロジェクト: pytorchfi/pytorchfi
    def test_neuron_fi_batch_gpu_2(self):
        self.images_gpu = self.images.cuda()
        self.model.cuda()
        self.model.eval()
        with torch.no_grad():
            golden_output = self.model(self.images_gpu)

        batch_i = [0, 2, 3]
        layer_i = [1, 2, 4]
        c_i = [3, 1, 1]
        h_i = [1, 0, 1]
        w_i = [0, 1, 0]

        inj_value_i = [10000.0, 10000.0, 10000.0]

        corrupt_model = pfi_core(
            self.model,
            self.batch_size,
            input_shape=[self.channels, self.img_size, self.img_size],
            use_cuda=True,
        ).declare_neuron_fi(
            batch=batch_i,
            layer_num=layer_i,
            dim1=c_i,
            dim2=h_i,
            dim3=w_i,
            value=inj_value_i,
        )

        corrupt_model.eval()
        with torch.no_grad():
            corrupt_output = corrupt_model(self.images_gpu)

        if torch.all(corrupt_output[0].eq(golden_output[0])):
            raise AssertionError
        if not torch.all(corrupt_output[1].eq(golden_output[1])):
            raise AssertionError
        if torch.all(corrupt_output[2].eq(golden_output[2])):
            raise AssertionError
        if torch.all(corrupt_output[3].eq(golden_output[3])):
            raise AssertionError
コード例 #18
0
    def setup_class(self):
        self.BATCH_SIZE = 1
        self.WORKERS = 1
        self.img_size = 32
        self.USE_GPU = False

        self.model, self.dataset = helper_setUp_CIFAR10_same(
            self.BATCH_SIZE, self.WORKERS)

        self.dataiter = iter(self.dataset)
        self.model.eval()

        torch.no_grad()
        self.images, self.labels = self.dataiter.next()
        self.output = self.model(self.images)

        self.p = pfi_core(
            self.model,
            self.img_size,
            self.img_size,
            self.BATCH_SIZE,
            use_cuda=self.USE_GPU,
        )
コード例 #19
0
ファイル: test_get_funcs.py プロジェクト: pytorchfi/pytorchfi
    def setup_class(self):
        batch_size = 4
        workers = 1
        channels = 3
        img_size = 32
        layer_types = [torch.nn.Conv2d, torch.nn.Linear]
        self.use_gpu = False

        model, dataset = CIFAR10_set_up_custom(batch_size, workers)

        dataiter = iter(dataset)
        model.eval()

        torch.no_grad()
        self.images, self.labels = dataiter.next()

        self.p = pfi_core(
            model,
            batch_size,
            input_shape=[channels, img_size, img_size],
            layer_types=layer_types,
            use_cuda=self.use_gpu,
        )
コード例 #20
0
    def setup_class(self):
        torch.manual_seed(0)

        batch_size = 1
        workers = 1
        channels = 3
        img_size = 32
        use_gpu = False

        self.model, self.dataset = CIFAR10_set_up_custom(batch_size, workers)
        dataiter = iter(self.dataset)
        self.images, self.labels = dataiter.next()

        self.model.eval()
        with torch.no_grad():
            self.golden_output = self.model(self.images)

        self.p = pfi_core(
            self.model,
            batch_size,
            input_shape=[channels, img_size, img_size],
            layer_types=[torch.nn.Conv2d, torch.nn.Linear],
            use_cuda=use_gpu,
        )
コード例 #21
0
    def setup_class(self):
        torch.manual_seed(0)

        batch_size = 4
        workers = 1
        channels = 3
        img_size = 32
        use_gpu = False

        model, dataset = CIFAR10_set_up_custom(batch_size, workers)
        dataiter = iter(dataset)

        self.images, self.labels = dataiter.next()

        model.eval()
        with torch.no_grad():
            self.golden_output = model(self.images)

        self.p = pfi_core(
            model,
            batch_size,
            input_shape=[channels, img_size, img_size],
            use_cuda=use_gpu,
        )
コード例 #22
0
from pytorchfi.core import fault_injection as pfi_core

if __name__ == "__main__":
    torch.random.manual_seed(5)
    h = 224
    w = 224
    batch_size = 1
    image = torch.rand((batch_size, 3, h, w))

    softmax = torch.nn.Softmax(dim=1)

    "Models"
    model = models.alexnet(pretrained=True)
    model.eval()

    pfi_model = pfi_core(model, h, w, batch_size)

    "Error-free inference to gather golden value"
    output = model(image)
    golden_softmax = softmax(output)
    golden_label = list(torch.argmax(golden_softmax, dim=1))[0].item()
    print("Error-free label:", golden_label)

    "Single Specified Neuron Injection"
    (b, layer, C, H, W, err_val) = (0, 3, 4, 2, 2, 10000)
    inj = pfi_model.declare_weight_fi(batch=b,
                                      conv_num=layer,
                                      c=C,
                                      h=H,
                                      w=W,
                                      value=err_val)
コード例 #23
0
def sample(net, device, dataset, cfg):
    print("Calling sample")
    scale = cfg.scale
    item = dataset[0]
    for step, (hr, lr, name) in enumerate(dataset):
        if "DIV2K" in dataset.name:
            t1 = time.time()
            h, w = lr.size()[1:]
            h_half, w_half = int(h / 2), int(w / 2)
            h_chop, w_chop = h_half + cfg.shave, w_half + cfg.shave
            lr_patch = torch.zeros([4, 3, h_chop, w_chop], dtype=torch.float)
            lr_patch[0].copy_(lr[:, 0:h_chop, 0:w_chop])
            lr_patch[1].copy_(lr[:, 0:h_chop, w - w_chop:w])
            lr_patch[2].copy_(lr[:, h - h_chop:h, 0:w_chop])
            lr_patch[3].copy_(lr[:, h - h_chop:h, w - w_chop:w])
            lr_patch = lr_patch.to(device)

            # Fault injection code
            pfi_model = pfi_core(net, h_chop, w_chop, 1, debug=True)
            inj_model = random_neuron_inj(pfi_model, min_val=-1, max_val=1)
            sr = inj_model(lr_patch).detach()
            #sr = net(lr_patch, cfg.scale).detach()

            h, h_half, h_chop = h * scale, h_half * scale, h_chop * scale
            w, w_half, w_chop = w * scale, w_half * scale, w_chop * scale

            result = torch.zeros([3, h, w], dtype=torch.float).to(device)
            print("Result shape: {}".format(result.size()))
            print("SR shape: {}".format(sr.size()))
            result[:, 0:h_half, 0:w_half].copy_(sr[0, :, 0:h_half, 0:w_half])
            result[:, 0:h_half, w_half:w].copy_(sr[1, :, 0:h_half,
                                                   w_chop - w + w_half:w_chop])
            result[:, h_half:h,
                   0:w_half].copy_(sr[2, :, h_chop - h + h_half:h_chop,
                                      0:w_half])
            result[:, h_half:h,
                   w_half:w].copy_(sr[3, :, h_chop - h + h_half:h_chop,
                                      w_chop - w + w_half:w_chop])
            sr = result
            t2 = time.time()
        else:
            t1 = time.time()
            lr = lr.unsqueeze(0).to(device)
            sr = net(lr, cfg.scale).detach().squeeze(0)
            lr = lr.squeeze(0)
            t2 = time.time()

        model_name = cfg.ckpt_path.split(".")[0].split("/")[-1]
        sr_dir = os.path.join(cfg.sample_dir, model_name,
                              cfg.test_data_dir.split("/")[-1],
                              "x{}".format(cfg.scale), "SR")
        hr_dir = os.path.join(cfg.sample_dir, model_name,
                              cfg.test_data_dir.split("/")[-1],
                              "x{}".format(cfg.scale), "HR")

        os.makedirs(sr_dir, exist_ok=True)
        os.makedirs(hr_dir, exist_ok=True)

        sr_im_path = os.path.join(sr_dir, "{}".format(name.replace("HR",
                                                                   "SR")))
        hr_im_path = os.path.join(hr_dir, "{}".format(name))

        save_image(sr, sr_im_path)
        save_image(hr, hr_im_path)
        print("Saved {} ({}x{} -> {}x{}, {:.3f}s)".format(
            sr_im_path, lr.shape[1], lr.shape[2], sr.shape[1], sr.shape[2],
            t2 - t1))
コード例 #24
0
ファイル: test_neuron_fi.py プロジェクト: pytorchfi/pytorchfi
    def test_neuron_single_fi_gpu(self):
        self.images_gpu = self.images.cuda()
        self.model.cuda()
        self.model.eval()
        with torch.no_grad():
            golden_output = self.model(self.images_gpu)

        batch_i = [0]
        layer_i = [4]
        c_i = [0]
        h_i = [1]
        w_i = [1]
        inj_value_i = [10000.0]

        p = pfi_core(
            self.model,
            self.batch_size,
            input_shape=[self.channels, self.img_size, self.img_size],
            use_cuda=True,
        )

        corrupt_model_1 = p.declare_neuron_fi(
            batch=batch_i,
            layer_num=layer_i,
            dim1=c_i,
            dim2=h_i,
            dim3=w_i,
            value=inj_value_i,
        )

        corrupt_model_1.eval()
        with torch.no_grad():
            corrupt_output_1 = corrupt_model_1(self.images_gpu)

        if torch.all(corrupt_output_1.eq(golden_output)):
            raise AssertionError

        uncorrupt_model = p.declare_neuron_fi(
            batch=batch_i,
            layer_num=layer_i,
            dim1=c_i,
            dim2=h_i,
            dim3=w_i,
            value=[0],
        )

        uncorrupt_model.eval()
        with torch.no_grad():
            uncorrupted_output = uncorrupt_model(self.images_gpu)

        if not torch.all(uncorrupted_output.eq(golden_output)):
            raise AssertionError

        corrupt_model_2 = p.declare_neuron_fi(
            batch=batch_i,
            layer_num=layer_i,
            dim1=c_i,
            dim2=h_i,
            dim3=w_i,
            value=inj_value_i * 2,
        )

        corrupt_model_2.eval()
        with torch.no_grad():
            corrupted_output_2 = corrupt_model_2(self.images_gpu)

        if torch.all(corrupted_output_2.eq(golden_output)):
            raise AssertionError
        if not torch.all(corrupted_output_2.eq(corrupted_output_2)):
            raise AssertionError