def __init__(self, port1, port2): atexit.register(self.close) self._port1 = port1 self._port2 = port2 self._buffer_size = 1024 self._open_socket = False self._open_send = False self._loaded = False self._bind = ("localhost", port1) log.error("socket start, rcv port:" + str(port1) + " send port:" + str(port2)) try: self._rcv_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self._rcv_socket.bind(self._bind) self._open_socket = True data = self._rcv_socket.recvfrom(1024) log.info("receive data") log.info(data[0].decode('utf-8')) except Exception as e: self._open_socket = False self.close() log.error(socket.error("socket error" + str(e.message))) raise try: self._snd_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self._bind2 = ("localhost", port2) self._open_send = True except Exception as e: self._snd_socket.close() self._open_send = False raise
def recv(self): try: data = self._rcv_socket.recvfrom(self._buffer_size) print("receive data") print(data[0].decode('utf-8')) except Exception as e: log.error(e.message) raise
def inference(self, args, to_save_dir, img_path): loaded = self.loadckpt(to_save_dir) if loaded: img = scipy.misc.imread(img_path, mode='RGB') img = scipy.misc.imresize(img, size=512) param = self.sess.run(self.extractor(img)) log.info("params:", param) else: log.error("error, loaded failed")
def __init__(self, path_to_dataset): self.dataset = [] print(path_to_dataset) if os.path.exists(path_to_dataset): for file_name in tqdm(os.listdir(path_to_dataset)): self.dataset.append(os.path.join(path_to_dataset, file_name)) else: log.error("can't be found path %s. Skip it." % path_to_dataset) log.info("Finished. Constructed Places2 dataset of %d images." % len(self.dataset))
def only_send(self, msg): """ 只发送 不接收 :param msg: """ try: self._snd_socket.sendto(msg.encode('utf-8'), self._bind2) print("send success") except Exception as e: log.error(e.message) raise
def rm_dir(path): """ 清空文件夹 包含子文件夹 :param path: 文件夹路径 """ try: if os.path.exists(path): log.warn("rm directory %s", path) shutil.rmtree(path) else: log.warn("not exist directory: %s", path) except IOError as e: log.error("io error, load imitator failed ", e)
def send_recv(self, msg): """ 发送之后 也接收 :param msg: """ try: msg = "rcv" + msg self._snd_socket.sendto(msg.encode('utf-8'), self._bind2) if msg != "quit": self.recv() except Exception as e: log.error(e.message) raise
def _send(self, cmd, message): """ private method to send message :param message: message body """ if self.open: try: message = cmd + message self.socket.sendto(message.encode('utf-8'), self.bind) except Exception as e: log.error(e) raise else: log.warn("connect closed")
def generate_file(path, content): """ 生成文件 :param path: file path :param content: file content :return: """ try: dir = os.path.pardir(path) if not os.path.exists(path): os.mkdir(dir) f = open(path, 'bw') f.write(content) f.close() except IOError as e: log.error("io error, load imitator failed {0}".format(e))
def clear_files(dir): """ 只清空文件 不清空子文件夹 如果文件夹不存在 则创建一个新的文件夹 :param dir: 文件夹路径 """ try: if os.path.exists(dir): for root, dirs, files in os.walk(dir, topdown=False): for name in files: os.remove(os.path.join(root, name)) else: log.warn("not exist directory: %s, create new", dir) os.mkdir(dir) except IOError as e: log.error("io error, load imitator failed {0}".format(e))
def clear_folder(dir): """ 清空文件夹 包含子文件夹 :param dir: 文件夹路径 """ try: if os.path.exists(dir): for root, dirs, files in os.walk(dir, topdown=False): for name in files: os.remove(os.path.join(root, name)) for name in dirs: os.rmdir(os.path.join(root, name)) else: log.warn("not exist directory: %s", dir) except IOError as e: log.error("io error, load imitator failed ", e)
def face_features(path_img, path_save=None): """ 提取脸部特征图片 :param path_img: input photo path, str :param path_save: output save image path, str :return: """ try: img = cv2.imread(path_img) if img.shape[0] * img.shape[1] > 512 * 512: img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5) scaled = align_face(img) if path_save is not None: cv2.imwrite(path_save, img) cv2.imwrite(path_save.replace("align_", "align2_"), scaled) return scaled except Exception as e: log.error(e)
def get_imit_cp(dir, ext=None): """ 匹配查找最近生成的checkpoint :param dir: search directory :param ext: 文件后缀 """ try: m_time = 0 rst = None for file in os.listdir(dir): path = os.path.join(dir, file) time = os.path.getmtime(path) match = True if ext is not None: match = file.endswith(ext) if match and (m_time == 0 or time > m_time): m_time = time rst = path return rst except IOError as e: log.error('get_imit_cp, io error')
def init_loadckpt(self, to_save_dir=None, ckpt_nmbr=None): init_op = tf.global_variables_initializer() self.sess.run(init_op) log.info("Start inference.") if self.load(self.checkpoint_dir, ckpt_nmbr): log.info(" [*] Load SUCCESS") else: if self.load(self.checkpoint_long_dir, ckpt_nmbr): log.info(" [*] Load SUCCESS") else: log.error(" [!] Load failed...") # Create folder to store results. if to_save_dir is None: to_save_dir = os.path.join( self.root_dir, self.model_name, 'inference_ckpt%d_sz%d' % (self.initial_step, self.image_size)) if not os.path.exists(to_save_dir): os.makedirs(to_save_dir) return to_save_dir
def main(_): args = parser.parse_args() log.init("FaceNeural", logging.DEBUG, log_path="output/log.txt") with tf.Session() as sess: if args.phase == "train": model = Face(sess, args) model.train(args) log.info('train mode') elif args.phase == "inference": log.info("inference") model = Face(sess, args) model.inference(args) elif args.phase == "lightcnn": log.info("light cnn test") elif args.phase == "faceparsing": log.info("faceparsing") elif args.phase == "net": log.info("net start with ports (%d, %d)", 5010, 5011) net = Net(5010, 5011) while True: r_input = raw_input("command: \n") if r_input == "s": msg = raw_input("input: ") net.only_send(msg) elif r_input == 'r': msg = raw_input("input: ") net.send_recv(msg) elif r_input == "q": net.only_send("quit") net.close() break else: log.error("unknown code, quit") net.close() break
p2 = os.path.join(path, "a-" + file) al = align.face_features(p, p2) ev = utils.parse_evaluate(al, args.parsing_checkpoint, cuda=cuda) p = os.path.join(path, "b-" + file) cv2.imwrite(p, ev) ev = 255 - utils.img_edge(ev) p = os.path.join(path, "c-" + file) cv2.imwrite(p, ev) elif args.phase == "dataset": dataset = FaceDataset(args, "test") dataset.pre_process(cuda) elif args.phase == "preview": log.info("preview picture") path = "../export/regular/model.jpg" img = cv2.imread(path) img2 = utils.parse_evaluate(img, args.parsing_checkpoint, cuda) img3 = utils.img_edge(img2) img3_ = ops.fill_grey(img3) img4 = align.face_features(path) log.info("{0} {1} {2} {3}".format(img.shape, img2.shape, img3_.shape, img4.shape)) ops.merge_4image(img, img2, img3_, img4, show=True) elif args.phase == "evaluate": log.info("evaluation mode start") evl = Evaluate(args, cuda=cuda) img = cv2.imread(args.eval_image).astype(np.float32) x_ = evl.itr_train(img) evl.output(x_, img) else: log.error("not known phase %s", args.phase)
def __init__(self, message): log.error("io error: " + message) self.message = message
def train(self, args, ckpt_nmbr=None): # Initialize augmentor. augmentor = img_augm.Augmentor( crop_size=[self.options.image_size, self.options.image_size], vertical_flip_prb=0., hsv_augm_prb=1.0, hue_augm_shift=0.05, saturation_augm_shift=0.05, saturation_augm_scale=0.05, value_augm_shift=0.05, value_augm_scale=0.05, ) content_dataset_coco = prepare_dataset.CocoDataset( path_to_dataset=self.options.path_to_content_dataset) art_dataset = prepare_dataset.ArtDataset( path_to_art_dataset=self.options.path_to_art_dataset) # Initialize queue workers for both datasets. q_art = multiprocessing.Queue(maxsize=10) q_content = multiprocessing.Queue(maxsize=10) jobs = [] for i in range(5): p = multiprocessing.Process( target=content_dataset_coco.initialize_batch_worker, args=(q_content, augmentor, self.batch_size, i)) p.start() jobs.append(p) p = multiprocessing.Process( target=art_dataset.initialize_batch_worker, args=(q_art, augmentor, self.batch_size, i)) p.start() jobs.append(p) log.info("Processes are started.") time.sleep(3) # Now initialize the graph init_op = tf.global_variables_initializer() self.sess.run(init_op) log.info("Start training.") if self.load(self.checkpoint_dir, ckpt_nmbr): log.info(" [*] Load SUCCESS") else: if self.load(self.checkpoint_long_dir, ckpt_nmbr): log.info(" [*] Load SUCCESS") else: log.error(" [!] Load failed...") # Initial discriminator success rate. win_rate = args.discr_success_rate discr_success = args.discr_success_rate alpha = 0.05 for step in tqdm(range(self.initial_step, self.options.total_steps + 1), initial=self.initial_step, total=self.options.total_steps): # Get batch from the queue with batches q, if the last is non-empty. while q_art.empty() or q_content.empty(): pass batch_art = q_art.get() batch_content = q_content.get() if discr_success >= win_rate: # Train generator _, summary_all, gener_acc_ = self.sess.run( [ self.g_optim_step, self.summary_merged_all, self.gener_acc ], feed_dict={ self.input_painting: normalize_arr_of_imgs(batch_art['image']), self.input_photo: normalize_arr_of_imgs(batch_content['image']), self.lr: self.options.lr }) discr_success = discr_success * (1. - alpha) + alpha * ( 1. - gener_acc_) else: # Train discriminator. _, summary_all, discr_acc_ = self.sess.run( [ self.d_optim_step, self.summary_merged_all, self.discr_acc ], feed_dict={ self.input_painting: normalize_arr_of_imgs(batch_art['image']), self.input_photo: normalize_arr_of_imgs(batch_content['image']), self.lr: self.options.lr }) discr_success = discr_success * (1. - alpha) + alpha * discr_acc_ self.writer.add_summary(summary_all, step * self.batch_size) if step % self.options.save_freq == 0 and step > self.initial_step: self.save(step) # And additionally save all checkpoints each 15000 steps. if step % 15000 == 0 and step > self.initial_step: self.save(step, is_long=True) if step % 500 == 0: output_paintings_, output_photos_ = self.sess.run( [self.input_painting, self.output_photo], feed_dict={ self.input_painting: normalize_arr_of_imgs(batch_art['image']), self.input_photo: normalize_arr_of_imgs(batch_content['image']), self.lr: self.options.lr }) save_batch( input_painting_batch=batch_art['image'], input_photo_batch=batch_content['image'], output_painting_batch=denormalize_arr_of_imgs( output_paintings_), output_photo_batch=denormalize_arr_of_imgs(output_photos_), filepath='%s/step_%d.jpg' % (self.sample_dir, step)) log.info("Training is finished. Terminate jobs.") for p in jobs: p.join() p.terminate() log.info("Done.")
log.warn("socket close") self._send('q', "-") # quit self.socket.close() self.open = False if __name__ == '__main__': from parse import parser import logging args = parser.parse_args() log.init("FaceNeural", logging.INFO, log_path="./output/log.txt") log.info(utils.curr_roleshape(args.path_to_dataset)) net = Net(args.udp_port, args) while True: r_input = input("command: ") if r_input == "m": net.send_message("hello world") elif r_input == "p": params = utils.random_params(args.params_cnt) net.send_param(params, str(random.randint(1000, 9999))) elif r_input == "q": net.close() break else: log.error("unknown code, quit") net.close() break