Пример #1
0
def predict(conv=args.conv):
    assert os.path.isdir(
        constants.CHECKPOINT_DIR), 'Error: Model is not availabel'
    if conv:
        checkpoint = torch.load(
            os.path.join(constants.CHECKPOINT_DIR, 'convergence.t7'))
        model.load_state_dict(checkpoint['model'])
    else:
        checkpoint = torch.load(
            os.path.join(constants.CHECKPOINT_DIR, 'best_acc_model.t7'))
        model.load_state_dict(checkpoint['model'])

    torch.set_grad_enabled(False)
    net.eval()
    test_correct = 0
    for batch_id, (images, labels) in enumerate(test_loader):
        images, labels = images.to(device), labels.to(device)
        outputs = models(images)

        _, predicted = outputs.max(1)
        test_correct += predicted.eq(labels).sum().item()

    print('Accuracy on test data: {}%'.format((test_correct / total) * 100))
        x: 网络输出
        target表示label(M,)
        """
        smoothed_label = x.data.clone()
        num_classes = x.size(1)
        smoothed_label.fill_(self.smoothing / (num_classes - 1)) # otherwise的公式
        #target.data.unsqueeze(1)表示索引,confidence表示填充的数字
        smoothed_label.scatter_(1, target.data.unsqueeze(1), self.confidence)

        return smoothed_label

if __name__=="__main__":
    # Example of label smoothing. 
    import sys
    sys.path.append('/home/gfx/Projects/Tinymind')
    import torchvision.models as models
    from networks.network import *
    from config import config

    backbone = models.resnet18(pretrained=True)
    models = ResNet18(backbone, 100)
    # print models
    data = torch.randn(8, 1, 128, 128)
    x = models(data)
    print(x)

    labelsmooth = LabelSmoothing(smoothing= 0.1)
    smoothed_label = labelsmooth(x, Variable(torch.LongTensor([2, 0, 10, 90, 1, 2, 4, 6])))
    print smoothed_label

Пример #3
0
                spoof_data_list.append(temp_dic)

face_model = None
if run_parameters['model'] == 'vggface':
    weight_pth = os.path.join('models', args.exp_name,
                              before_model_name + '_face_model.pth')

    print(weight_pth)
    vgg_face = vgg_face_dag(weights_path=weight_pth,
                            return_layer=run_parameters['return_layer'])

    for p in vgg_face.parameters():
        p.requires_grad = False
    face_model = vgg_face
if run_parameters['model'] == 'vgg16':
    vgg16 = torchvision.models().vgg16(pretrained=True)
    face_model = vgg16

face_model.eval()

if run_parameters['multi_gpu']:
    face_model = nn.DataParallel(face_model)

face_model.cuda()

inm = nn.InstanceNorm1d(1, affine=False)

print("No of training samples: {0}, {1}".format(len(live_data_list),
                                                len(spoof_data_list)))
# exit()
data_cell = {}
Пример #4
0
        models.train()
        ACC = 0
        batchHelper_train.resetIndex()
        while batchHelper_train._epochs_completed == 0:
            input_image, labelImage = batchHelper_train.next_batch(
                batch_size, True)
            # for i in range(int(totalSample/batch_size)):
            loss = 0
            optimizer.zero_grad()
            inputImageDataset = torch.from_numpy(input_image)
            inputImageDataset = inputImageDataset.to(device=device,
                                                     dtype=torch.float)
            # print(torch.sum(inputImageDataset))
            target_output = torch.from_numpy(labelImage.astype(int))
            target_output = target_output.to(device=device, dtype=torch.long)
            output_pred = models(inputImageDataset)

            loss = criterion(output_pred, target_output)
            loss.backward()
            optimizer.step()
            topv, topi = output_pred.topk(1)
            check_target = (topi.reshape(-1) == target_output)
            ACC += check_target.float().sum()
            if batchHelper_train._index_in_epoch % batch_size == 0:
                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.9f} current ACC {} '\
                    .format(y, batchHelper_train._index_in_epoch, totalSample_train,\
                    (batchHelper_train._index_in_epoch * 100.) / totalSample_train, \
                        loss.item(),(check_target.float().sum()/batch_size)*100))

        ############# validation #####################
        batchHelper_val.resetIndex()
Пример #5
0
def test_visualisation(model_path, models, test_images_paths, test_dataset, mode ,device):
	'''
	Test and save sample images with bounding boxes and the labels

	Inputs:
	- model_path : string( path to the model )
	- models : weights (model weights)
	- test_images_paths : string ( model path )
	- test_dataset : Dataset (testing dataset)
	- device : device (cuda)

	Outputs:

	saved images
	'''
	import random

	label_dict= { 1: 'Car', 2 : 'Van' , 3 : 'Truck', 0 : 'Background'}

	directory = os.path.join('results', 'sample_bbox')


	def save_image(image,directory, filename):
		if not os.path.exists(directory):
			os.makedirs(directory)

		img_file = os.path.join(directory, filename)

		image.save(img_file)

	print(f'Loading model to score images. Scores saved {model_path}') # Testing the model

	mean_test_accuracy = []


	model_file = torch.load(model_path)
	models.load_state_dict(model_file)

	print('Model loaded')


	models.to(device)
	models.eval()

	with torch.no_grad():

		for path in test_images_paths:
			with tqdm(total = len(test_dataset)) as bar:
				for image in test_dataset.images:
					img = os.path.join(path, image)
					l_img, pic_image = load_image(img)

					output = models(l_img)

					im_show = l_img.permute(2,0,3,1)
					im_show = im_show.squeeze(1)

					labels = output[0]['labels']
					scores = output[0]['scores']

					mean_score = scores.mean()


					if torch.isnan(mean_score).any():
						continue

					# print(f'This is the mean score : {mean_score}')

					rect = output[0]['boxes']

					if rect.nelement() != 0:
						i = 0
						int_rects = rect.int().cpu().numpy()
						labels = labels.int().cpu().numpy()
						scores = scores.float().cpu().numpy()

						for int_rect, label, score in zip(int_rects, labels, scores):
							# print(label_dict[label], score)
							if score >= 0.5:
								r = random.randint(20,255)
								g = random.randint(20,255)
								b = random.randint(20,255)
								rgb = (r,g,b)

								x0,y0 ,x1,y1 = int_rect
								img1 = ImageDraw.Draw( pic_image )   
								font = ImageFont.truetype("bevan.ttf", 20)
								# img1.text([x0,y0,x1,y1+10], label, fill=(255,255,0))
								img1.text((0,0+i),f'{label_dict[label]} {score} ', rgb,font=font)
								img1.rectangle([x0,y0 ,x1,y1], outline = rgb, width = 3) # Draw the text on the 
								i += 20
							else:
								continue

						save_image(pic_image, os.path.join(directory, str(mode)), f'{image[:-4]}_samplebbox.png')

					mean_score = mean_score.float().cpu().numpy()

					mean_test_accuracy.append(mean_score)
					bar.update()

		print('FINISHED TESTING')
Пример #6
0
def train(models, writer, device):
    vgg19 = torchvision.models.vgg19(pretrained=True)
    for param in vgg19.parameters():
        param.required_grad = False
    vgg19.to(device)

    real_labels = torch.ones((config.batch_size, 1), device=device)
    fake_labels = torch.zeros((config.batch_size, 1), device=device)
    for idx in range(config.load_iter, config.train_iters):
        train_phone, train_dslr = load_train_data(
            config.dataset_dir, config.phone, config.batch_size,
            (config.channels, config.height, config.width))
        train_phone = torch.as_tensor(train_phone, device=device)
        train_dslr = torch.as_tensor(train_dslr, device=device)

        # --------------------------------------------------------------------------------------------------------------
        #                                              Train discriminators
        # --------------------------------------------------------------------------------------------------------------
        enhanced, phone_rec = models(train_phone)

        # 1) Adversarial color loss
        dslr_blur = gaussian_blur(train_dslr, config.kernel_size, config.sigma,
                                  config.channels, device)
        dslr_blur_logits = models.dis_c(dslr_blur)
        enhanced_blur = gaussian_blur(enhanced.detach(), config.kernel_size,
                                      config.sigma, config.channels, device)
        enhanced_blur_logits = models.dis_c(enhanced_blur)
        dis_loss_color = models.bce_criterion(dslr_blur_logits, real_labels) \
                         + models.bce_criterion(enhanced_blur_logits, fake_labels)

        # 2) Adversarial texture loss
        dslr_gray = rgb_to_gray(train_dslr, device)
        dslr_gray_logits = models.dis_t(dslr_gray)
        enhanced_gray = rgb_to_gray(enhanced.detach(), device)
        enhanced_gray_logits = models.dis_t(enhanced_gray)
        dis_loss_texture = models.bce_criterion(dslr_gray_logits, real_labels) \
                           + models.bce_criterion(enhanced_gray_logits, fake_labels)

        # Sum of losses
        dis_loss = dis_loss_color + dis_loss_texture

        models.dis_optimizer.zero_grad()
        dis_loss.backward()
        models.dis_optimizer.step()

        # --------------------------------------------------------------------------------------------------------------
        #                                                Train generators
        # --------------------------------------------------------------------------------------------------------------
        # 1) Content consistency loss
        phone_vgg = get_content(vgg19, train_phone, config.content_id, device)
        phone_rec_vgg = get_content(vgg19, phone_rec, config.content_id,
                                    device)
        gen_loss_content = models.mse_criterion(phone_vgg, phone_rec_vgg)

        # 2) Adversarial color loss
        enhanced_blur = gaussian_blur(enhanced, config.kernel_size,
                                      config.sigma, config.channels, device)
        enhanced_blur_logits = models.dis_c(enhanced_blur)
        gen_loss_color = models.bce_criterion(enhanced_blur_logits,
                                              real_labels)

        # 3) Adversarial texture loss
        enhanced_gray = rgb_to_gray(enhanced, device)
        enhanced_gray_logits = models.dis_t(enhanced_gray)
        gen_loss_texture = models.bce_criterion(enhanced_gray_logits,
                                                real_labels)

        # 4) TV loss
        y_tv = models.mse_criterion(enhanced[:, :, 1:, :],
                                    enhanced[:, :, :-1, :])
        x_tv = models.mse_criterion(enhanced[:, :, :, 1:],
                                    enhanced[:, :, :, :-1])
        gen_loss_tv = y_tv + x_tv

        # Sum of losses
        gen_loss = config.w_content * gen_loss_content + config.w_color * gen_loss_color \
                   + config.w_texture * gen_loss_texture + config.w_tv * gen_loss_tv

        models.gen_optimizer.zero_grad()
        gen_loss.backward()
        models.gen_optimizer.step()

        if (idx + 1) % config.print_step == 0:
            print(
                "Iteration: {}/{}, gen_loss: {:.4f}, dis_loss: {:.4f}".format(
                    idx + 1, config.train_iters, gen_loss.item(),
                    dis_loss.item()))
            print(
                "gen_loss_content: {:.4f}, gen_loss_color: {:.4f}, gen_loss_texture: {:.4f}, gen_loss_tv: {:.4f}"
                .format(gen_loss_content.item(), gen_loss_color.item(),
                        gen_loss_texture.item(), gen_loss_tv.item()))
            print("dis_loss_color: {:.4f}, dis_loss_texture: {:.4f}".format(
                dis_loss_color.item(), dis_loss_texture.item()))

        if (idx + 1) % config.tensorboard_step == 0:
            scalar_dict = {
                'gen_loss': gen_loss.item(),
                'dis_loss': dis_loss.item(),
                'gen_loss_content': gen_loss_content.item(),
                'gen_loss_color': gen_loss_color.item(),
                'gen_loss_texture': gen_loss_texture.item(),
                'gen_loss_tv': gen_loss_tv.item(),
                'dis_loss_color': dis_loss_color.item(),
                'dis_loss_texture': dis_loss_texture.item()
            }
            img_dict = {
                'phone': train_phone,
                'dslr': train_dslr,
                'enhanced': enhanced,
                'phone_rec': phone_rec,
                'dslr_blur': dslr_blur,
                'enhanced_blur': enhanced_blur,
                'dslr_gray': dslr_gray,
                'enhanced_gray': enhanced_gray
            }
            for tag, scalar_value in scalar_dict.items():
                writer.add_scalar(tag, scalar_value, idx + 1)
            for tag, img_tensor in img_dict.items():
                writer.add_images(tag, img_tensor, idx + 1)

        if (idx + 1) % config.checkpoint_step == 0:
            checkpoint_path = os.path.join(config.checkpoint_dir, config.phone)
            torch.save(
                models.gen_g.state_dict(),
                os.path.join(checkpoint_path,
                             '{:05d}-gen_g.ckpt'.format(idx + 1)))
            torch.save(
                models.gen_f.state_dict(),
                os.path.join(checkpoint_path,
                             '{:05d}-gen_f.ckpt'.format(idx + 1)))
            torch.save(
                models.dis_c.state_dict(),
                os.path.join(checkpoint_path,
                             '{:05d}-dis_c.ckpt'.format(idx + 1)))
            torch.save(
                models.dis_t.state_dict(),
                os.path.join(checkpoint_path,
                             '{:05d}-dis_t.ckpt'.format(idx + 1)))