@LastEditTime: 2019-09-16 12:55:43 @Update: ''' import os import cv2 import torch import pickle import numpy as np from PIL import Image from matplotlib import pyplot as plt from module import CRN, _pca_ torch.set_default_tensor_type('torch.cuda.FloatTensor') ## Load models pca = _pca_.PCA() model = pickle.load(open("models/final_model.pkl", 'rb')) model = model.cuda() datapath = "data/makeuporigin" outputpath = "data/output" makeuppath = "data/makeup" with open("%s/multidetect.txt" % datapath, 'r') as f: detects = eval(f.read()) n = len(detects) for i, (dirname, (_, [x1, y1, x2, y2], _)) in enumerate(detects.items()): print("[%d]/[%d]" % (i + 1, n))
def train(param): # train_dataset = registry.create('Dataset', param["train_dataset"]["name"])(**param["train_dataset"]["kwargs"]) # valid_dataset = registry.create('Dataset', param["valid_dataset"]["name"])(**param["valid_dataset"]["kwargs"]) # train_data_loader = data.DataLoader(train_dataset, **param["loader"]) pca = _pca_.PCA(**param["PCA"]["kwargs"]) # model_names = ["model_CRN_6_110.pkl", # "model_CRN_6_120.pkl", # "model_CRN_6_130.pkl", # "model_CRN_6_140.pkl", # "model_CRN_6_150.pkl"] model_names = ["final_model.pkl"] torch.set_default_tensor_type('torch.cuda.FloatTensor') selected_ids = [3, 903, 1015, 1012, 1008] for model_id, model_name in enumerate(model_names): model = pickle.load(open("./models/" + model_name, "rb")) model = model.cuda() print("starting validating...") parts = model_name.split(".") # model_path = "./output_beautified/" + parts[0] model_path = "./output" if not os.path.exists(model_path): os.mkdir(model_path) if param["valid"]: with torch.no_grad(): print(param["valid"]) for i in selected_ids: # path = param["valid"] + "/beautified ({}).jpg".format(i) # inputs = img_to_tensor(path) # path = "./data/EECS442_Makeup_Go/result_original/original ({}).jpg".format(i) # targets = img_to_tensor(path) # inputs.unsqueeze_(0) # targets.unsqueeze_(0) # ground_truth = pca.get_components(inputs, targets, True) # print("generating one image...") # ground_truth = ground_truth[0] # print("gound truth size ", ground_truth.shape) # for j in range(8): # img = np.array(ground_truth[j]).transpose((1,2,0)) # img[img>255] = 255 # img[img<0] = 0 # cm = None # fn = model_path + "/result_{}_{}.jpg".format(i, j) # save_image(img, cm, fn) # path = param["valid"] + "/beautified({}).jpg".format(i) # x = img_to_tensor(path).cuda() # x.unsqueeze_(0) # output = model(x) # img = pca.generate_img(output, x) # np_img = np.array(img[0]) # np_img[np_img>255] = 255 # np_img[np_img<0] = 0 # np_img = np_img.transpose((1, 2, 0)).astype(np.uint8) # plt.close() # plt.imshow(np_img) # plt.savefig(model_path + "/result_{}.jpg".format(i)) path = param["valid"] + "/beautified ({}).jpg".format(i) x = img_to_tensor(path).cuda() sz = x.shape x.unsqueeze_(0) output = torch.squeeze(model(x), 0) print("generating one image...") img = pca.generate_img(output, x) np_img = np.array(img[0]) np_img[np_img > 255] = 255 np_img[np_img < 0] = 0 np_img = np_img.transpose((1, 2, 0)).astype(np.uint8) save_image(np_img, None, model_path + "/result_{}.jpg".format(i))
def train(param): # TODO # Use PCA part to get a test dataset: # DATASET: 1. train data: the beautified images # (Based on PCA Results ->) 2. The components of the difference between the beautified images and the ground truth # Check the size of dataset images, decide whether to use batch operation # return a tensor of the eigenvalues # DRAFT: train_dataset = registry.create( 'Dataset', param["train_dataset"]["name"])(**param["train_dataset"]["kwargs"]) # valid_dataset = registry.create('Dataset', param["valid_dataset"]["name"])(**param["valid_dataset"]["kwargs"]) train_data_loader = data.DataLoader(train_dataset, **param["loader"]) # valid_data_loader = data.DataLoader(valid_dataset, **param["loader"]) # pin_memory=param.use_gpu, # eigenvalues = train_dataset.get_() # model = torch.nn.DataParallel(registry.create('Network', param.network.name)(**param.network.kwargs)) model = CRN.CRN(**param["network"]["kwargs"]) criterion = registry.create( 'Loss', param["loss"]["name"])(**param["loss"]["kwargs"]) optimizer = registry.create('Optimizer', param["optimizer"]["name"])( model.parameters(), **param["optimizer"]["kwargs"]) lr_scheduler = registry.create('LRScheduler', param["lr_scheduler"]["name"])( optimizer, **param["lr_scheduler"]["kwargs"]) pca = _pca_.PCA(**param["PCA"]["kwargs"]) # TODO: checkpoint # if param.use_gpu: # model = model.cuda() torch.set_default_tensor_type('torch.cuda.FloatTensor') model = model.cuda() load_previous = False if load_previous: pca = pickle.load(open("trained_models/model_PCA_9_0.pkl", "rb")) model = pickle.load(open("trained_models/model_CRN_9_0.pkl", "rb")) is_train = True g_counter = 0 if is_train: save_dir = "./models" # idx = int(os.listdir(save_dir)[-1][5:]) + 1 save_dir = "./models/model_PCA_modified" if not os.path.exists(save_dir): os.mkdir(save_dir) save_dir += "/" if not os.path.exists("./output_PCA_modified"): os.mkdir("./output_PCA_modified") for epoch in range(int(param["epoch"])): print("epoch:", epoch) counter = 0 for inputs, targets in train_data_loader: lr_scheduler.step() optimizer.zero_grad() ground_truth = pca.get_components(inputs, targets, True) # print('########') # print(ground_truth.shape) # for current_data in ground_truth: # for img in current_data: # save_image(img/torch.max(img)*255, 'decomposed_ground_truth/' + str(g_counter) + '.jpg') # g_counter += 1 # print('########') # print('=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=') # print(ground_truth) # print('-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-') # print("get") output = model(inputs) # print(output) # print("model") # with torch.no_grad(): # img = pca.generate_img(output[0], inputs[0]) # save_image(img, "output_PCA_modified/train/result_{}_{}_before.jpg".format(epoch, counter)) loss = criterion(output, ground_truth) if counter % 10 == 0: # if not torch.isnan(loss): # # Fixme change the epoch +10 back # with open(save_dir + 'model_PCA_{}_{}.pkl'.format(epoch+10, counter), 'wb') as out: # pickle.dump(pca, out, pickle.HIGHEST_PROTOCOL) with open( save_dir + 'model_CRN_{}_{}.pkl'.format(epoch, counter), 'wb') as out: pickle.dump(model, out, pickle.HIGHEST_PROTOCOL) print("loss {}:".format(counter), loss.item(), "\tlr:", lr_scheduler.get_lr()[0]) print("starting validating...") if param["valid"]: with torch.no_grad(): # image = Image.open(param["valid"]) # x = TF.to_tensor(image).cuda() x = img_to_tensor(param["valid"]) sz = x.shape x.unsqueeze_(0) output = torch.squeeze(model(x), 0) print("generating one image...") img = pca.generate_img(output, x) np_img = np.array(img[0]) # print('before conversion') # print('@@@@@@@@') # print(np_img) np_img[np_img > 255] = 255 np_img[np_img < 0] = 0 np_img = np_img.transpose( (1, 2, 0)).astype(np.uint8) plt.close() plt.imshow(np_img) plt.savefig( "output_PCA_modified/result_{}_{}.jpg".format( epoch, counter)) # save_image(img, "output_PCA_modified/result_{}_{}.jpg".format(epoch, counter)) loss.backward() # perform gradient clipping clip_grad_norm_(model.parameters(), 2) optimizer.step() # with torch.no_grad(): # output = torch.squeeze(model(inputs[0].unsqueeze_(0)), 0) # img = pca.generate_img(output, inputs[0]).view(sz) # save_image(img, "output_PCA_modified/train/result_{}_{}_after.jpg".format(epoch, counter)) counter += 1 # model.eval() # @TODO: EVALUATE!!! # valid_data_loader... # how to get the truth # ground_truth = pca.get_components(inputs, targets, False) # how to get img: # output = model(inputs) # img = pca.generate_img(output, inputs) # model.train() # torch.save({ # 'model': model.state_dict(), # 'optimizer': optimizer.state_dict(), # 'lr_scheduler': lr_scheduler.state_dict() # }, # os.path.join("TODO!!!") # ) # logging.debug('saving model done') # print("starting validating...") # if param["valid"]: # with torch.no_grad(): # image = Image.open(param["valid"]) # x = TF.to_tensor(image).cuda() # sz = x.shape # x.unsqueeze_(0) # output = torch.squeeze(model(x), 0) # print("generating one image...") # img = pca.generate_img(output, x).view(sz) # save_image(img, "output_PCA_modified/result_{}.jpg".format(epoch)) else: print("starting validating...") if param["valid"]: with torch.no_grad(): image = Image.open(param["valid"]) parts = param["valid"].split("/") parts = parts[-1].split(".") parts = parts[0].split("_") name = parts[0] x = TF.to_tensor(image).cuda() sz = x.shape x.unsqueeze_(0) output = model(x) print(output.shape) output = torch.squeeze(output, 0) print(output.shape) print("generating one image...") img = pca.generate_img(output, x).view(sz) print(img) save_image(img, "output/{}_result.jpg".format(name))