def get(self, *args, **kwargs): image = self.get_argument('image', '') _type = self.get_argument('type', '') pos = self.get_argument('pos', '') if not image or not _type or not pos or _type not in ('txt', 'img'): self.render_obj(dict(code=400, msg='arguments error')) return img_image, fp_image, fn_image = utils.get_upload_image_file( self.upload_path, 'image', image) if not img_image: self.render_obj(dict(code=404, msg='image file not found')) return wm_path = os.path.join(self.media_path, 'watermark', _type) if _type == 'img': mark = self.get_argument('mark', '') if not mark: self.render_obj(dict(code=400, msg='args error: mark')) return img_mark, fp_mark, _ = utils.get_upload_image_file( self.upload_path, 'mark', mark) if not img_mark: self.render_obj(dict(code=404, msg='mark file not found')) return position, img_re_mark = utils.get_mark_position( img_image, img_mark, pos) if not position: self.render_obj(dict(code=404, msg='args error: pos')) return fp = '%s-%s' % (fp_image, fp_mark) elif _type == 'txt': txt = self.get_argument('txt', '') font = self.get_argument('font', '') font_size = int(self.get_argument('fontSize', '24')) font_color = self.get_argument('fontColor', '') if not txt or not font or not font_size or not font_color: self.render_obj( dict(code=400, msg='args error: txt or font setting')) return img_mark = utils.text2img(txt, os.path.join(self.static_path, 'fonts'), font, font_color=font_color, font_size=font_size) fp = fp_image else: self.render_obj(dict(code=404, msg='args error: type')) return position, img_re_mark = utils.get_mark_position( img_image, img_mark, pos) if not position: self.render_obj(dict(code=404, msg='args error: pos')) return fn = '%s-%s' % (pos, fn_image) new_img = utils.img_composite(img_image, img_re_mark, position) new_img_path = os.path.join(wm_path, fp) utils.img_save(new_img, new_img_path, fn) self.render_obj(dict(code=200, msg='success', fp=fp, fn=fn))
def generate_samples(self, c1, c2, args): c1 = torch.LongTensor([c1]).to(self.device) c2 = torch.LongTensor([c2]).to(self.device) c1 = to_onehot(c1, self.device) c2 = to_onehot(c2, self.device) #对当前类别随机采样 random_samples = self.generate_class_samples(c1, args.sample_num) img_save(random_samples, args.random_samples_save_path) # 生成插值样本 interpolation_samples = self.generate_interpolation_samples(c1, c2, args.sample_num) img_save(interpolation_samples, args.interpolation_samples_save_path)
def model_test(model, test_iter): model.eval() for batch_idx, batch_data in enumerate(test_iter): batch_size = batch_data[0].shape[0] batch_data = batch_data[0].to(model.device) batch_data = batch_data.view(batch_size, -1) target = batch_data.clone() _, _, recon_result = model(batch_data) img_name = 'target' + str(batch_idx) img_save(target, img_name) img_name = 'recon_result' + str(batch_idx) img_save(recon_result, img_name)
def get(self, *args, **kwargs): try: app_os = self.get_argument('os', '') icon = self.get_argument('icon', '') subscript = self.get_argument('subscript', '') icon_set = self.app_icon_set.get(app_os) if not icon_set or not icon: self.render_obj(dict(code=400, msg='arguments error')) return img_icon, fp_icon, _ = utils.get_upload_image_file( self.upload_path, 'icon', icon) img_sub, fp_sub = None, '' if not img_icon: self.render_obj(dict(code=404, msg='image file not found')) return if subscript: img_sub, fp_sub, _ = utils.get_upload_image_file( self.upload_path, 'subscript', subscript) if not img_sub: self.render_obj(dict(code=404, msg='image file not found')) return new_img = utils.img_composite(img_icon, img_sub) if img_sub else img_icon fp = '%s-%s' % (fp_icon, fp_sub) if fp_sub else fp_icon app_os_path = os.path.join(self.media_path, 'icons', app_os) new_img_path = os.path.join(app_os_path, fp) for d in icon_set: size = d.get('size', '') filename = d.get('filename', '') if not filename or not size: continue d_img = utils.img_resize(new_img, size) if not d_img: continue utils.img_save(d_img, new_img_path, filename) # 生成所有icon的zip文件 zf_name = os.path.join(app_os_path, '%s.zip' % fp) zf = zipfile.ZipFile(zf_name, 'w', zipfile.ZIP_DEFLATED) for fn in os.listdir(new_img_path): zf.write(os.path.join(new_img_path, fn), os.path.join(fp, fn)) zf.close() self.render_obj(dict(code=200, msg='success', fp=fp)) except: traceback.print_exc() self.render_obj(dict(code=500, msg='system error'))
def generate_samples(self, img1, img2, args): #随机采样 random_samples = self.generate_random_samples(args.sample_num) img_save(random_samples, args.random_samples_save_path) # 生成相似样本 similar_samples = self.generate_similar_samples( img1.unsqueeze(0), args.sample_num) img_save(similar_samples, args.similar_samples_save_path) # 生成插值样本 interpolation_samples = self.generate_interpolation_samples( img1.unsqueeze(0), img2.unsqueeze(0), args.sample_num) img_save(interpolation_samples, args.interpolation_samples_save_path)
'''------------------------Data Load-------------------------''' ref, hr = dataload.dataLoader("./SRNTT1000.h5") M_t = np.load("autumn1000_M_t.npy") M_s = np.load("autumn1000_M_s.npy") train_hr, test_hr, train_ref, test_ref, train_Mt, test_Mt, train_Ms, test_Ms \ = train_test_split(hr, ref, M_t, M_s, test_size=0.2) train_lr = utils.img_resize(train_hr, 25) train_lref = utils.img_resize(train_ref, 25) test_lr = utils.img_resize(test_hr, 25) test_lred = utils.img_resize(test_ref, 25) for i in range(test_ref.shape[0]): path = './result/ref/'+ str(i+1) + '.bmp' utils.img_save(test_ref[i,:,:,:], path) path = './result/lr/' + str(i+1) + '.bmp' utils.img_save(test_lr[i,:,:,:], path) path = './result/hr/' + str(i+1) + '.bmp' utils.img_save(test_hr[i,:,:,:], path) '''----------------------Net Construct-------------------------''' x = tf.placeholder(tf.float32, [None, 40, 40, 3]) y = tf.placeholder(tf.float32, [None, 160, 160, 3]) Mt_ph= tf.placeholder(tf.float32, [None, 40, 40, 256]) Ms_ph= tf.placeholder(tf.float32, [None, 40, 40, 256]) train_mode = tf.placeholder(tf.bool) Learning_rate = tf.placeholder(tf.float32, shape=[]) ce_net = ce_model.CE(x, train_mode)
k = 0 for i in range(65): path = '../dataset/91-image/t' + str(i + 1) + '.bmp' img = utils.img_read(path) img = utils.img_crop(img, 160, 160) if img.shape != (160, 160, 3): continue train_hr[k, :, :, :] = img img = utils.img_downsize(img, 25) train_lr[k, :, :, :] = img k += 1 k = 0 for i in range(26): path = '../dataset/91-image/tt' + str(i + 1) + '.bmp' img = utils.img_read(path) img = utils.img_crop(img, 160, 160) if img.shape != (160, 160, 3): continue test_hr[k, :, :, :] = img img = utils.img_downsize(img, 25) test_lr[k, :, :, :] = img k += 1 prob = test_ce(train_lr, train_hr, test_lr, test_hr) for i in range(10): path = '../dataset/91-image/t' + str(i + 1) + '_.bmp' utils.img_save(prob[i, :, :, :], path)