Пример #1
0
    def process_batch(self, paths):

        helpers.clear_folder('tmp/cam/img/')
        helpers.clear_folder('tmp/cam/overlay/')

        imgs = np.array([self.read_img(x) for x in paths])
        helpers.write_imgs_serially(imgs * 255, base_path='tmp/cam/img/')

        results = self.get_pred_cam(imgs)
        overlays = np.array(
            [heatmap for original_img, heatmap, overlay, result in results])
        helpers.write_imgs_serially(overlays, base_path='tmp/cam/overlay/')
Пример #2
0
    def train(self, num_epochs=1):

        G, D = self.get_models()
        criterion = torch.nn.BCELoss()
        fixed_noise = torch.randn(self.batch_size, self.latent_dims).to(self.device)
        G_opt = torch.optim.Adam(G.parameters(), lr=0.0002, betas=(.5, .999))
        D_opt = torch.optim.Adam(D.parameters(), lr=0.0002, betas=(.5, .999))
        nr_train_batches,train_dataloader = self.get_data('all_train')

        helpers.clear_folder(self.gen_img_dir)
        helpers.clear_folder(self.save_path)

        for epoch in range(num_epochs):
            for i in tqdm(range(nr_train_batches)):
                real_imgs,__ = next(train_dataloader)

                real = torch.full((real_imgs.shape[0], 1), 1).to(self.device)
                fake = torch.full((real_imgs.shape[0], 1), 0).to(self.device)
                real_imgs = real_imgs.to(self.device)

                D.zero_grad()
                D_real_loss = criterion(D(real_imgs), real)
                D_real_loss.backward()

                z = torch.randn((real_imgs.shape[0], 100)).to(self.device)
                gen_imgs = G(z).detach()
                D_fake_loss = criterion(D(gen_imgs), fake)
                D_fake_loss.backward()

                D_loss = (D_real_loss + D_fake_loss)/2.0
                D_opt.step()

                G.zero_grad()
                z = torch.randn((real_imgs.shape[0], 100)).to(self.device)
                gen_imgs = G(z)
                G_loss = criterion(D(gen_imgs), real)
                G_loss.backward()
                G_opt.step()

                if i % 100 == 0:
                    print('Epoch %d Step %d D_loss %.4f G_loss %.4f ' %(epoch, i, D_loss.item(), G_loss.item()))
                    name = str(epoch).zfill(4)+str(i).zfill(4) + '.png'
                    save_image(G(fixed_noise)[:25], self.gen_img_dir+name, nrow=5, normalize=True)

            name = str(epoch).zfill(4)
            torch.save(G.state_dict(), self.save_path+name+'_gen.pth')
            torch.save(D.state_dict(), self.save_path+name+'_disc.pth')
Пример #3
0
def save_csvs(target_dir,no_valid=False):
	helpers.clear_folder(target_dir)
	if no_valid:
		X_train, y_train = load_orig_data(split='train')
		X_test, y_test = load_orig_data(split='test')
		data = {'train':(X_train,y_train),'test':(X_test,y_test)}
	else:
		X_train,y_train,X_valid,y_valid = split_train_valid()
		X_test, y_test = load_orig_data(split='test')
		data = {'train':(X_train,y_train),'test':(X_test,y_test),'valid':(X_valid,y_valid)}
	
	for split in data.keys():
		X,y = data[split]
		X.to_csv('%s%s_X.csv'%(target_dir,split),index=False)
		y.to_csv('%s%s_y.csv'%(target_dir,split),index=False)
		if verbose > 0:
			print(split,X.shape,np.unique(y,return_counts=True))
Пример #4
0
	def __init__(self,samples_per_class,seed,gpu,dataset):

		self.num_classes = 10
		self.latent_dim = 100
		self.batch_size = 500
		self.samples_per_class = samples_per_class
		self.io = data_io.Data_IO(self.samples_per_class,self.batch_size,dataset=dataset,unlab_samples_per_class=5000)
		self.lr = 0.0003
		# self.early_stopping_patience = 20
		self.early_stopping_patience = 1000
		self.reduce_lr_patience = 1000

		self.dataset = dataset
		self.name = 'ssl_lab_%s_%d_seed%d'%(dataset,samples_per_class,seed)
		self.best_save_path = 'models/%s/best/'%(self.name)
		self.last_save_path = 'models/%s/last/'%(self.name)
		self.device = 'cuda:%d'%(gpu)
		self.seed = 42
		torch.manual_seed(self.seed)
		
		self.log_dir = 'logs/%s/'%(self.name)
		helpers.clear_folder(self.log_dir)
		self.writer = SummaryWriter(self.log_dir)
Пример #5
0
	def train(self,num_epochs,resume=False):
		G,D = self.get_model(resume=resume)
		all_train_loader = self.get_dataloader(split='all_train')
		train_loader = self.get_dataloader(split='lab_train')
		lab_train_loader = self.io.create_infinite_dataloader(train_loader)
		valid_loader = self.get_dataloader(split='valid')
		if not resume:
			helpers.clear_folder(self.best_save_path)
			helpers.clear_folder(self.last_save_path)

		XE = nn.CrossEntropyLoss().cuda()

		opt_gen = torch.optim.Adam(G.parameters(), lr=self.lr)
		opt_disc = torch.optim.Adam(D.parameters(), lr=self.lr)
		scheduler_disc = torch.optim.lr_scheduler.ReduceLROnPlateau(opt_disc, mode='min', factor=0.5, patience=self.reduce_lr_patience, verbose=True, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-08)
		scheduler_gen = torch.optim.lr_scheduler.ReduceLROnPlateau(opt_gen, mode='min', factor=0.5, patience=self.reduce_lr_patience, verbose=True, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-08)
		# scheduler_disc = torch.optim.lr_scheduler.ExponentialLR(opt_disc,gamma=.999,last_epoch=-1)
		# scheduler_gen = torch.optim.lr_scheduler.ReduceLROnPlateau(opt_gen,gamma=.999,last_epoch=-1)
		max_val_loss = None
		max_val_acc = None
		no_improvement = global_train_step = global_test_step = 0
		fixed_noise = torch.randn(self.batch_size,self.latent_dim)

		for epoch_idx in range(num_epochs):
			
			avg_gen_loss = avg_disc_loss = 0
			G.train(); D.train()
			for unlab_train_x,__ in tqdm(all_train_loader):
				lab_train_x,lab_train_y = next(lab_train_loader)
				
				unl = unlab_train_x.cuda()
				inp = lab_train_x.cuda()
				lbl = lab_train_y.cuda()
				z = torch.randn(self.batch_size,self.latent_dim).cuda()

				# Train Discriminator
				opt_disc.zero_grad()
				gen_inp = G(z)
				__, logits_lab = D(inp)
				layer_fake, logits_gen = D(gen_inp)
				layer_real, logits_unl = D(unl)
				l_unl = torch.logsumexp(logits_unl,dim=1)
				l_gen = torch.logsumexp(logits_gen,dim=1)
				loss_unl = .5 * torch.mean(F.softplus(l_unl)) - .5* torch.mean(l_unl) +.5 * torch.mean(F.softplus(l_gen))
				loss_lab = torch.mean(XE(logits_lab, lbl))
				loss_disc = .5 * loss_lab + .5 * loss_unl
				loss_disc.backward()
				opt_disc.step()
				avg_disc_loss += loss_disc

				# Train Generator
				opt_gen.zero_grad()
				opt_disc.zero_grad()
				gen_inp = G(z)
				layer_fake, __ = D(gen_inp)
				layer_real, __ = D(unl)
				m1 = torch.mean(layer_real,dim=0)
				m2 = torch.mean(layer_fake,dim=0)
				loss_gen = torch.mean((m1-m2)**2)
				loss_gen.backward()
				opt_gen.step()
				avg_gen_loss += loss_gen

				self.writer.add_scalar('gen_loss',loss_gen,global_train_step)
				self.writer.add_scalar('disc_loss',loss_disc,global_train_step)
				global_train_step += 1
				# print('Loss Gen %.4f Loss Disc %.4f'%(loss_gen,loss_disc))

			avg_gen_loss /= len(all_train_loader)
			avg_disc_loss /= len(all_train_loader)

			val_loss = num_correct = total_samples = 0.0
			with torch.no_grad():
				D.eval()
				for x,y in tqdm(valid_loader):
					x = x.cuda(); y = y.cuda();
					__,logits = D(x)
					loss = XE(logits,y)
					self.writer.add_scalar('val_loss',loss,global_test_step)
					global_test_step += 1
					val_loss += loss.item()
					pred = torch.argmax(logits,dim=1)
					num_correct += torch.sum(pred==y)
					total_samples += len(y)
	
				val_loss /= len(valid_loader)
				acc = num_correct.item() / total_samples

			print('Epoch %d disc_loss %.3f gen_loss %.3f val_loss %.3f acc %.3f'%(epoch_idx,avg_disc_loss,avg_gen_loss,val_loss,acc))
			# print(acc)
			scheduler_gen.step(val_loss)
			scheduler_disc.step(val_loss)

			if max_val_loss is None:
				max_val_loss = val_loss + 1
			if max_val_acc is None:
				max_val_acc = acc - 1  
			
			no_improvement += 1
			if val_loss < max_val_loss or acc > max_val_acc:
				no_improvement = 0
				if val_loss < max_val_loss:
					max_val_loss = val_loss
					print('Best model updated - Loss reduced to :',max_val_loss)
				elif acc > max_val_acc:
					max_val_acc = acc
					print('Best model updated - Val acc improved to :',acc)

				torch.save(D.state_dict(), self.best_save_path+'disc.pth')
				torch.save(G.state_dict(), self.best_save_path+'gen.pth')

			torch.save(D.state_dict(), self.last_save_path+'disc.pth')
			torch.save(G.state_dict(), self.last_save_path+'gen.pth')

			if no_improvement > self.early_stopping_patience:
				print('Early Stopping')
				break

		self.writer.close()
Пример #6
0
            try:
                yield next(data_iter)
            except StopIteration:
                data_iter = iter(dataloader)


if __name__ == '__main__':

    dataset_name = 'mnist'
    # dataset_name = 'cifar10'
    io = Data_IO(samples_per_class=100,
                 batch_size=50,
                 dataset=dataset_name,
                 unlab_samples_per_class=5000)

    dataset = io.get_dataset(split='train', verbose=1)
    dataset = io.get_dataset(split='valid', verbose=1)
    dataset = io.get_dataset(split='test', verbose=1)

    dataloader = io.get_dataloader(split='lab_train', verbose=1)
    dataloader = io.get_dataloader(split='valid', verbose=1)
    dataloader = io.get_dataloader(split='test', verbose=1)
    dataloader = io.get_dataloader(split='all_train', verbose=1)

    disp_dir = 'tmp/%s/' % (dataset_name)
    helpers.clear_folder(disp_dir)
    for i, (x, y) in enumerate(dataloader):
        io.write_imgs(x, path=disp_dir + str(i).zfill(4) + '.jpg')
        if i < 1:
            print(i, x.shape, y)
Пример #7
0
    def train(self, num_epochs):
        model = self.get_model().cuda()
        train_loader = self.get_dataloader(split='lab_train')
        valid_loader = self.get_dataloader(split='valid')
        helpers.clear_folder(self.best_save_path)
        helpers.clear_folder(self.last_save_path)

        # criterion = nn.NLLLoss().cuda()
        criterion = nn.CrossEntropyLoss().cuda()
        opt = torch.optim.Adam(model.parameters(), lr=self.lr)
        # opt = torch.optim.SGD(model.parameters(), lr=self.lr, nesterov=True, momentum=.9,weight_decay=1e-6)
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
            opt,
            mode='min',
            factor=0.1,
            patience=8,
            verbose=True,
            threshold=0.0001,
            threshold_mode='rel',
            cooldown=0,
            min_lr=0,
            eps=1e-08)
        max_val_loss = None
        no_improvement = 0

        global_train_step = 0
        global_test_step = 0

        for epoch_idx in range(num_epochs):

            train_loss = 0.0
            model.train()
            for x, y in tqdm(train_loader):
                x = x.cuda()
                y = y.cuda()
                opt.zero_grad()
                __, logits = model(x)
                loss = criterion(logits, y)
                self.writer.add_scalar('train_loss', loss, global_train_step)
                global_train_step += 1
                loss.backward()
                opt.step()
                train_loss += loss.item()
            train_loss /= len(train_loader)

            val_loss = num_correct = total_samples = 0.0
            with torch.no_grad():
                model.eval()
                for x, y in tqdm(valid_loader):
                    x = x.cuda()
                    y = y.cuda()
                    __, logits = model(x)
                    loss = criterion(logits, y)
                    self.writer.add_scalar('val_loss', loss, global_test_step)
                    global_test_step += 1
                    val_loss += loss.item()
                    pred = torch.argmax(logits, dim=1)
                    num_correct += torch.sum(pred == y)
                    total_samples += len(y)

                val_loss /= len(valid_loader)
                acc = num_correct.item() / total_samples

            print('Epoch %d train_loss %.3f val_loss %.3f acc %.3f' %
                  (epoch_idx, train_loss, val_loss, acc))
            scheduler.step(val_loss)

            if max_val_loss is None:
                max_val_loss = val_loss + 1

            no_improvement += 1
            if val_loss < max_val_loss:
                no_improvement = 0
                max_val_loss = val_loss
                print('Best model updated - Loss reduced to :', max_val_loss)
                torch.save(model.state_dict(),
                           self.best_save_path + 'disc.pth')

            torch.save(model.state_dict(), self.last_save_path + 'disc.pth')

            if no_improvement > self.early_stopping_patience:
                print('Early Stopping')
                break

        self.writer.close()