Example #1
0
def get_preds(args=None):
    parser = argparse.ArgumentParser(description='Simple testing script.')
    parser.add_argument('--cls_id', help='class id', type=int)
    parser.add_argument('--version', help='model version', type=float)
    parser.add_argument('--resume_epoch',
                        help='trained model for resume',
                        type=int)
    parser.add_argument('--set_name',
                        help='imply attack goal',
                        type=str,
                        default='test_digi_ifgsm_hiding')
    parser.add_argument('--gamma',
                        help='gamma for the SoftL1Loss',
                        type=float,
                        default=9.0)
    parser.add_argument('--checkpoints',
                        help='checkpoints path',
                        type=str,
                        default='voc_checkpoints')
    parser.add_argument('--saves_dir',
                        help='the save path for tested reconstruction error',
                        type=str,
                        default='voc_reconstruction_error')
    parser.add_argument('--batch_size',
                        help='batch size for optimization',
                        type=int,
                        default=1)
    parser = parser.parse_args(args)

    batch_size = parser.batch_size
    if not os.path.isdir(parser.saves_dir):
        os.mkdir(parser.saves_dir)

    cls_name = classes[parser.cls_id]
    parser.checkpoints = '_'.join([parser.checkpoints, cls_name])

    checkpoint_name = os.path.join(
        parser.checkpoints,
        'model_{:1.1f}_epoch{:d}.pt'.format(parser.version,
                                            parser.resume_epoch))
    if not os.path.isfile(checkpoint_name):
        raise ValueError('No checkpoint file {:s}'.format(checkpoint_name))
    assert batch_size == 1

    print('[data prepare]....')
    cls_dir = "../context_profile/voc_detection_{:s}_p10/"\
     .format(cls_name)
    dataloader_test = DataLoader(Fetch(parser.set_name, root_dir=cls_dir),
                                 batch_size=batch_size,
                                 num_workers=1,
                                 shuffle=False)

    print('[model prepare]....')
    use_gpu = torch.cuda.device_count() > 0
    model = AutoEncoder(parser.gamma)
    if use_gpu:
        model = torch.nn.DataParallel(model).cuda()
    model.load_state_dict(torch.load(checkpoint_name))
    print('model loaded from {:s}'.format(checkpoint_name))

    print('[model testing]...')
    model.eval()
    preds = []
    with torch.no_grad():
        for sample in iter(dataloader_test):
            if use_gpu:
                data = sample['data'].cuda().float()

            else:
                data = sample['data'].float()
            loss = model(data)
            preds.append(float(loss))
    preds_name = '_model{:1.1f}_' + parser.set_name
    save_name = os.path.join(parser.saves_dir,
                             cls_name + preds_name.format(parser.version))
    np.save(save_name, preds)
    print('save preds in {:s}'.format(save_name))
Example #2
0
def main(args=None):
	parser = argparse.ArgumentParser(description='Simple training script.')
	parser.add_argument('--cls_id', help='class id', type=int)
	parser.add_argument('--version', help='model version', type=float)
	parser.add_argument('--gamma', help='gamma for the SoftL1Loss', type=float, default=9.0)
	parser.add_argument('--lr', help='lr for optimization', type=float, default=1e-4)
	parser.add_argument('--epoches', help='num of epoches for optimization', type=int, default=4)
	parser.add_argument('--resume_epoch', help='trained model for resume', type=int, default=0)
	parser.add_argument('--batch_size', help='batch size for optimization', type=int, default=10)
	parser.add_argument('--checkpoints', help='checkpoints path', type=str, default='voc_checkpoints')
	parser = parser.parse_args(args)

	cls_name = classes[parser.cls_id]
	parser.checkpoints = '_'.join([parser.checkpoints,cls_name])
	if not os.path.isdir(parser.checkpoints):
		os.mkdir(parser.checkpoints)
	print('will save checkpoints in '+parser.checkpoints)
	cls_dir = "../context_profile/voc_detection_{:s}_p10/"\
		.format(cls_name)
	batch_size = parser.batch_size
	print('[data prepare]....')
	dataloader_train = DataLoader(Fetch('train_benign', root_dir=cls_dir), batch_size=batch_size, num_workers=2, shuffle=True)

	print('[model prepare]....')
	use_gpu = torch.cuda.device_count()>0

	model = AutoEncoder(parser.gamma)
	if use_gpu:
		model = torch.nn.DataParallel(model).cuda()
	optimizer = torch.optim.Adam(model.parameters(), lr=parser.lr)
	scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=2, verbose=True)
	if parser.resume_epoch > 0 :
		checkpoint_name = os.path.join(parser.checkpoints, 'model_{:1.1f}_epoch{:d}.pt'.format(parser.version, parser.resume_epoch))
		if not os.path.isfile(checkpoint_name):
			raise ValueError('No checkpoint file {:s}'.format(checkpoint_name))
		model.load_state_dict(torch.load(checkpoint_name))
		print('model loaded from {:s}'.format(checkpoint_name))

	print('[model training]...')
	loss_hist = []
	epoch_loss = []
	num_iter = len(dataloader_train)
	for epoch_num in range(parser.resume_epoch, parser.epoches):
		model.train()
		for iter_num, sample in enumerate(dataloader_train):
			if True:#try:
				optimizer.zero_grad()
				if use_gpu:
					data = sample['data'].cuda().float()
				else:
					data = sample['data'].float()
					
				loss = model(data).mean()
				if bool(loss==0):
					continue 
				loss.backward()
				torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
				optimizer.step()
				epoch_loss.append(float(loss))
				loss_hist.append(float(loss))
				if iter_num % 30 == 0:
					print('Epoch {:d}/{:d} | Iteration: {:d}/{:d} | loss: {:1.5f}'.format(
						epoch_num+1, parser.epoches, iter_num+1, num_iter, float(loss)))
				if iter_num % 3000 == 0:
					scheduler.step(np.mean(epoch_loss))
					epoch_loss = []
		if epoch_num < 1:
			continue
		checkpoint_name = os.path.join(parser.checkpoints, 'model_{:1.1f}_epoch{:d}.pt'.format(parser.version, epoch_num+1))
		torch.save(model.state_dict(), checkpoint_name)
		print('Model saved as {:s}'.format(checkpoint_name))

	np.save('loss_hist.npy', loss_hist)
Example #3
0
class BpcvMain:
    def __init__(self):
        imgs_dir = "./imgs_comp_box"
        imgs_mask_dir = "./imgs_mask_box"

        self.str_imgs_fns = []
        self.str_mask_fns = []

        dirs = []

        for parent, dirnames, filenames in os.walk(imgs_mask_dir):
            for dirname in dirnames:
                dirs.append(dirname)

        for str_dir in dirs:
            str_dir_path = imgs_mask_dir + "/" + str_dir
            for parent, dirnames, filenames in os.walk(str_dir_path):
                for filename in filenames:
                    str_path = str_dir_path + "/" + filename
                    self.str_mask_fns.append(str_path)
                    idx = filename.find(".png")
                    str_img_path = imgs_dir + "/" + str_dir + "/" + filename[:
                                                                             idx] + ".png"
                    self.str_imgs_fns.append(str_img_path)

        #str_pth_fn = "./models/bpcv_encoder_06000.pth"
        str_pth_fn = "./models/bpcv_encoder_12000.pth"

        self.autoencoder = AutoEncoder()

        bpcv_dict = torch.load(str_pth_fn)
        self.autoencoder.load_state_dict(bpcv_dict["net_state"])

        print("continue: ...  n_loop: {0:0>5d}  idx_loop: {1:0>5d}".format(
            bpcv_dict["n_loop"], bpcv_dict["idx_loop"]))
        print(
            ".............................................................................."
        )

        self.win = Gtk.Window()
        self.win.connect("delete-event", self.win_quit)
        self.win.set_default_size(1000, 600)
        self.win.set_title("show imgs")

        self.sw = Gtk.ScrolledWindow()
        self.win.add(self.sw)
        self.sw.set_border_width(2)

        fig = Figure(figsize=(8, 8), dpi=80)
        self.canvas = FigureCanvas(fig)
        self.canvas.set_size_request(1000, 600)
        self.sw.add(self.canvas)
        self.win.show_all()

        self.torch_lock = threading.Lock()
        self.torch_show_data = {}
        self.n_test_imgs = 5
        self.torch_show_data["mess_quit"] = False

        thread_torch = Encoder_Thread(self.update_torch_data,
                                      self.torch_lock,
                                      self.autoencoder,
                                      self.str_imgs_fns,
                                      self.str_mask_fns,
                                      self.torch_show_data,
                                      wh=97,
                                      max_n_loop=3,
                                      n_loop=bpcv_dict["n_loop"],
                                      idx_segment=bpcv_dict["idx_loop"])

        thread_torch.start()

    def update_torch_data(self, str_txt):

        self.torch_lock.acquire()
        np_imgs = self.torch_show_data["np_imgs"]
        np_mask_imgs = self.torch_show_data["np_mask_imgs"]
        np_decoded = self.torch_show_data["np_decoded"]
        self.torch_lock.release()

        np_imgs = np_imgs.transpose((0, 2, 3, 1))

        self.sw.remove(self.canvas)

        axs = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]

        fig = Figure(figsize=(8, 8), dpi=80)

        for n in range(3):
            for i in range(self.n_test_imgs):
                axs[n][i] = fig.add_subplot(3, self.n_test_imgs,
                                            n * self.n_test_imgs + i + 1)

        for i in range(self.n_test_imgs):
            axs[0][i].imshow(np_imgs[i] * 0.5 + 0.5, cmap='gray')
            axs[1][i].imshow(np_mask_imgs[i][0], cmap='gray')
            axs[2][i].imshow(np_decoded[i][0], cmap='gray')

        self.canvas = FigureCanvas(fig)
        self.canvas.set_size_request(1000, 600)
        self.sw.add(self.canvas)
        self.sw.show_all()

    def win_quit(self, a, b):
        self.torch_lock.acquire()
        self.torch_show_data["mess_quit"] = True
        self.torch_lock.release()
        Gtk.main_quit()