def itr_train(self, y): """ iterator train :param y: numpy array, image [H, W, C] """ param_cnt = self.args.params_cnt t_params = 0.5 * torch.ones((1, param_cnt), dtype=torch.float32) if self.cuda: t_params = t_params.cuda() t_params.requires_grad = True self.losses.clear() lr = self.learning_rate progress = tqdm(range(self.max_itr), initial=0, total=self.max_itr) for i in progress: y_ = self.imitator.forward(t_params) loss, info = self.evaluate_ls(y, y_, i) loss.backward() t_params.data = t_params.data - lr * t_params.grad.data t_params.data = t_params.data.clamp(0., 1.) t_params.grad.zero_() progress.set_description(info) if self.max_itr % 100 == 0: x = i / float(self.max_itr) lr = self.learning_rate * (x**2 - 2 * x + 1) + 1e-4 self.plot() log.info("steps:{0} params:{1}".format(self.max_itr, t_params.data)) return t_params
def __init__(self, port1, port2): atexit.register(self.close) self._port1 = port1 self._port2 = port2 self._buffer_size = 1024 self._open_socket = False self._open_send = False self._loaded = False self._bind = ("localhost", port1) log.error("socket start, rcv port:" + str(port1) + " send port:" + str(port2)) try: self._rcv_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self._rcv_socket.bind(self._bind) self._open_socket = True data = self._rcv_socket.recvfrom(1024) log.info("receive data") log.info(data[0].decode('utf-8')) except Exception as e: self._open_socket = False self.close() log.error(socket.error("socket error" + str(e.message))) raise try: self._snd_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self._bind2 = ("localhost", port2) self._open_send = True except Exception as e: self._snd_socket.close() self._open_send = False raise
def itr_train(self, y): """ iterator train :param y: numpy array, image [H, W, C] """ param_cnt = self.args.params_cnt t_params = 0.5 * torch.ones((1, param_cnt), dtype=torch.float32) if self.cuda: t_params = t_params.cuda() t_params.requires_grad = True self.losses.clear() lr = self.learning_rate self._init_l1_l2(y) m_progress = tqdm(range(1, self.max_itr + 1)) for i in m_progress: y_ = self.imitator(t_params) loss, info = self.evaluate_ls(y_) loss.backward() if i == 1: self.output(t_params, y, 0) t_params.data = t_params.data - lr * t_params.grad.data t_params.data = t_params.data.clamp(0., 1.) t_params.grad.zero_() m_progress.set_description(info) if i % self.args.eval_prev_freq == 0: x = i / float(self.max_itr) lr = self.learning_rate * (1 - x) + 1e-2 self.output(t_params, y, i) self.plot() self.plot() log.info("steps:{0} params:{1}".format(self.max_itr, t_params.data)) return t_params
def inference(self, args, path_to_folder, to_save_dir=None, resize_to_original=True, ckpt_nmbr=None): to_save_dir = self.init_loadckpt(to_save_dir, ckpt_nmbr) names = [] for d in path_to_folder: names += glob(os.path.join(d, '*')) names = sorted([x for x in names if os.path.basename(x)[0] != '.']) for img_idx, img_path in enumerate(tqdm(names)): img = scipy.misc.imread(img_path, mode='RGB') img_shape = img.shape[:2] # Resize the smallest side of the image to the self.image_size alpha = float(self.image_size) / float(min(img_shape)) img = scipy.misc.imresize(img, size=alpha) img = np.expand_dims(img, axis=0) d_list = self.sess.run( self.output_photo, feed_dict={self.input_photo: normalize_arr_of_imgs(img)}) img = d_list[0] img = denormalize_arr_of_imgs(img) if resize_to_original: img = scipy.misc.imresize(img, size=img_shape) img_name = os.path.basename(img_path) scipy.misc.imsave( os.path.join(to_save_dir, img_name[:-4] + "_stylized.jpg"), img) log.info("Inference is finished.")
def __init__(self, args, mode="train"): """ Dataset construction :param args: argparse options :param mode: "train": 训练集, "test": 测试集 """ self.names = [] self.params = [] if mode == "train": self.path = args.path_to_dataset elif mode == "test": self.path = args.path_to_testset else: raise NeuralException("not such mode for dataset") self.args = args if os.path.exists(self.path): name = "db_description" path = os.path.join(self.path, name) log.info(path) f = open(path, "rb") self.cnt = struct.unpack("i", f.read(4))[0] for it in range(self.cnt): kk = f.read(10)[1:] # 第一个是c#字符串的长度 self.names.append(str(kk, encoding='utf-8')) v = [] for i in range(args.params_cnt): v.append(struct.unpack("f", f.read(4))[0]) self.params.append(v) f.close() else: log.info("can't be found path %s. Skip it.", self.path)
def inference(self, args, to_save_dir, img_path): loaded = self.loadckpt(to_save_dir) if loaded: img = scipy.misc.imread(img_path, mode='RGB') img = scipy.misc.imresize(img, size=512) param = self.sess.run(self.extractor(img)) log.info("params:", param) else: log.error("error, loaded failed")
def __init__(self, path_to_dataset): self.dataset = [] print(path_to_dataset) if os.path.exists(path_to_dataset): for file_name in tqdm(os.listdir(path_to_dataset)): self.dataset.append(os.path.join(path_to_dataset, file_name)) else: log.error("can't be found path %s. Skip it." % path_to_dataset) log.info("Finished. Constructed Places2 dataset of %d images." % len(self.dataset))
def content_loss(img1, img2): """ change resolution to 1/8, 512/8 = 64 :param img1: numpy array 64x64 :param img2: numpy array :return: tensor """ image1 = torch.from_numpy(img1) image2 = torch.from_numpy(img2) image2 = image2.view(64, 64, 1) log.info("img1 size {0} img2 size: {1}".format(image1.size(), image2.size())) return F.mse_loss(image1, image2)
def load(self, checkpoint_dir, ckpt_nmbr=None): if ckpt_nmbr: if len( [x for x in os.listdir(checkpoint_dir) if str(ckpt_nmbr) in x ]) > 0: log.info(" [*] Reading checkpoint %d from folder %s." % (ckpt_nmbr, checkpoint_dir)) ckpt_name = [ x for x in os.listdir(checkpoint_dir) if str(ckpt_nmbr) in x ][0] ckpt_name = '.'.join(ckpt_name.split('.')[:-1]) self.initial_step = ckpt_nmbr log.info("Load checkpoint %s. Initial step: %s." % (ckpt_name, self.initial_step)) self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name)) return True else: return False else: print(" [*] Reading latest checkpoint from folder %s." % checkpoint_dir) ckpt = tf.train.get_checkpoint_state(checkpoint_dir) if ckpt and ckpt.model_checkpoint_path: ckpt_name = os.path.basename(ckpt.model_checkpoint_path) log.info("checkpoint path: ", ckpt.model_checkpoint_path) self.initial_step = int(ckpt_name.split("_")[-1].split(".")[0]) log.info("Load checkpoint %s. Initial step: %s." % (ckpt_name, self.initial_step)) self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name)) return True else: return False
def move_file(srcfile, dstfile): """ move file from source to destination :param srcfile: source path :param dstfile: destination path """ if not os.path.isfile(srcfile): log.info("%s not exist!" % srcfile) else: file_root, file_name = os.path.split(dstfile) # 分离文件名和路径 if not os.path.exists(file_root): os.makedirs(file_root) # 创建路径 shutil.move(srcfile, dstfile) # 移动文件 log.info("move %s -> %s" % (srcfile, dstfile))
def evaluate(self): """ 评估准确率 :return: accuracy rate """ self.model.eval() dataset = FaceDataset(self.args, mode="test") steps = 100 accuracy = 0.0 for step in range(steps): log.info("step: %d", step) names, params, images = dataset.get_batch(batch_size=self.args.batch_size, edge=False) loss, _ = self.itr_train(images) accuracy += 1.0 - loss accuracy = accuracy / steps log.info("accuracy rate is %f", accuracy) return accuracy
def init_device(arguments): """ 检查配置和硬件是否支持gpu :param arguments: 配置 :return: 返回True 则支持gpu """ support_gpu = torch.cuda.is_available() log.info("neural face network use gpu: %s", support_gpu and arguments.use_gpu) if support_gpu and arguments.use_gpu: if not arguments.gpuid: arguments.gpuid = 0 dev = torch.device("cuda:%d" % arguments.gpuid) return True, dev else: dev = torch.device("cpu") return False, dev
def __init__(self, port, arguments): """ net initial :param port: udp 端口号 :param arguments: parse options """ atexit.register(self.close) self.port = port self.args = arguments self.buffer_size = 1024 self.open = False log.info("socket start, port:" + str(port)) try: self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.bind = ("localhost", port) self.open = True except Exception as e: self.close() raise
def inference(self, cp_name, photo_path, cuda): """ feature extractor: 由图片生成捏脸参数 :param cuda: gpu speed up :param cp_name: checkpoint's path :param photo_path: input photo's path :return: params [1, params_cnt] """ img = cv2.imread(photo_path) scaled = align.align_face(img, size=(64, 64)) self.load_checkpoint(cp_name, training=False, cuda=cuda) img = utils.faceparsing_ndarray(scaled, self.args.parsing_checkpoint, cuda) img = utils.img_edge(img) with torch.no_grad: input = torch.from_numpy(img) input = input.view([1, 1, 64, 64]) params_ = self(input) log.info(params_) return params_
def __init__(self, args): self.dataset = {} self.path_to_dataset = args.path_to_dataset cnt = args.db_item_cnt self.args = args if os.path.exists(self.path_to_dataset): name = "db_description" path = os.path.join(self.path_to_dataset, name) log.info(path) f = open(path, "rb") for it in range(cnt): kk = f.read(9)[1:] # 第一个是c#字符串的长度 k = struct.unpack("8s", kk)[0] v = [] for i in range(args.params_cnt): v.append(struct.unpack("f", f.read(4))[0]) self.dataset[k] = v f.close() else: print("can't be found path %s. Skip it." % self.path_to_dataset)
def load_checkpoint(self, path, training=False, cuda=False): """ 从checkpoint 中恢复net :param path: checkpoint's path :param training: 恢复之后 是否接着train :param cuda: gpu speedup """ path_ = self.args.path_to_inference + "/" + path if not os.path.exists(path_): raise NeuralException("not exist checkpoint of extractor with path " + path) if cuda: checkpoint = torch.load(path_) else: checkpoint = torch.load(path_, map_location='cpu') self.load_state_dict(checkpoint['net']) self.optimizer.load_state_dict(checkpoint['optimizer']) self.initial_step = checkpoint['epoch'] log.info("recovery imitator from %s", path) if training: self.batch_train(cuda)
def feature256(img, checkpoint): """ 使用light cnn提取256维特征参数 :param checkpoint: lightcnn model :param img: tensor 输入图片 shape:(batch, 512, 512, 3) :return: 256维特征参数 """ model = LightCNN_29Layers_v2(num_classes=80013) model.eval() model = torch.nn.DataParallel(model) # .cuda() model.load_state_dict(checkpoint['state_dict']) transform = transforms.Compose([transforms.ToTensor()]) img = np.reshape(img, (1, 128, 128, -1)) log.info(img) img = scipy.misc.imresize(arr=img.numpy(), size=(128, 128)) img = transform(img) input[0, :, :, :] = img input_var = torch.autograd.Variable(input, volatile=True) _, features = model(input_var) return features
def __init__(self, name, args, imitator=None, momentum=0.5): """ feature extractor :param name: model name :param args: argparse options :param imitator: imitate engine's behaviour :param momentum: momentum for optimizer """ super(Extractor, self).__init__() log.info("construct feature_extractor %s", name) self.name = name self.imitator = imitator self.initial_step = 0 self.args = args self.model_path = "./output/extractor" self.prev_path = "./output/preview" self.training = False self.params_cnt = self.args.params_cnt self.dataset = None self.train_mode = Extractor.TRAIN_SYNC self.train_refer = 32 self.net = Net(args.udp_port, args) self.clean() self.writer = SummaryWriter(comment="feature extractor", log_dir=args.path_tensor_log) self.model = nn.Sequential( nn.Conv2d(1, 4, kernel_size=7, stride=2, padding=3), # 1. (batch, 4, 32, 32) nn.MaxPool2d(kernel_size=3, stride=2, padding=1), # 2. (batch, 4, 16, 16) group(4, 8, kernel_size=3, stride=1, padding=1), # 3. (batch, 8, 16, 16) ResidualBlock.make_layer(8, channels=8), # 4. (batch, 8, 16, 16) group(8, 16, kernel_size=3, stride=1, padding=1), # 5. (batch, 16, 16, 16) ResidualBlock.make_layer(8, channels=16), # 6. (batch, 16, 16, 16) group(16, 64, kernel_size=3, stride=1, padding=1), # 7. (batch, 64, 16, 16) ResidualBlock.make_layer(8, channels=64), # 8. (batch, 64, 16, 16) group(64, self.params_cnt, kernel_size=3, stride=1, padding=1), # 9. (batch, params_cnt, 16, 16) ResidualBlock.make_layer(4, channels=self.params_cnt), # 10. (batch, params_cnt, 16, 16) nn.Dropout(0.5), ) self.fc = nn.Linear(self.params_cnt * 16 * 16, self.params_cnt) self.optimizer = optim.Adam(self.parameters(), lr=args.extractor_learning_rate) utils.debug_parameters(self, "_extractor_")
def batch_train(self, cuda): log.info("feature extractor train") initial_step = self.initial_step total_steps = self.args.total_extractor_steps self.training = True self.dataset = FaceDataset(self.args, mode="train") rnd_input = torch.randn(self.args.batch_size, 1, 64, 64) if cuda: rnd_input = rnd_input.cuda() self.writer.add_graph(self, input_to_model=rnd_input) progress = tqdm(range(initial_step, total_steps + 1), initial=initial_step, total=total_steps) for step in progress: if self.train_mode == Extractor.TRAIN_SYNC: progress.set_description("sync mode ") names, _, images = self.dataset.get_batch(batch_size=self.args.batch_size, edge=True) if cuda: images = images.cuda() self.sync_train(images, names, step) else: image1, image2, name = self.dataset.get_cache(cuda) if image1 is None or image2 is None: self.change_mode(Extractor.TRAIN_SYNC) continue loss = self.asyn_train(image1, image2) loss_ = loss.detach().numpy() loss_display = loss_ * 1000 progress.set_description("loss: {:.3f}".format(loss_display)) self.writer.add_scalar('extractor/loss', loss_display, step) if step % self.args.extractor_prev_freq == 0: self.capture(image1, image2, name, step, cuda) lr = self.args.extractor_learning_rate * loss_display self.writer.add_scalar('extractor/learning rate', lr, step) utils.update_optimizer_lr(self.optimizer, lr) if step % self.args.extractor_save_freq == 0: self.save(step) self.writer.close()
def main(_): args = parser.parse_args() log.init("FaceNeural", logging.DEBUG, log_path="output/log.txt") with tf.Session() as sess: if args.phase == "train": model = Face(sess, args) model.train(args) log.info('train mode') elif args.phase == "inference": log.info("inference") model = Face(sess, args) model.inference(args) elif args.phase == "lightcnn": log.info("light cnn test") elif args.phase == "faceparsing": log.info("faceparsing") elif args.phase == "net": log.info("net start with ports (%d, %d)", 5010, 5011) net = Net(5010, 5011) while True: r_input = raw_input("command: \n") if r_input == "s": msg = raw_input("input: ") net.only_send(msg) elif r_input == 'r': msg = raw_input("input: ") net.send_recv(msg) elif r_input == "q": net.only_send("quit") net.close() break else: log.error("unknown code, quit") net.close() break
def loadckpt(self, checkpoint_dir=None): init_op = tf.global_variables_initializer() self.sess.run(init_op) log.info("Start inference.") ckpt = tf.train.get_checkpoint_state(checkpoint_dir) if ckpt and ckpt.model_checkpoint_path: ckpt_name = os.path.basename(ckpt.model_checkpoint_path) log.info("checkpoint path: ", ckpt.model_checkpoint_path) self.initial_step = int(ckpt_name.split("_")[-1].split(".")[0]) log.info("Load checkpoint %s. Initial step: %s." % (ckpt_name, self.initial_step)) self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name)) return True else: return False
def init_loadckpt(self, to_save_dir=None, ckpt_nmbr=None): init_op = tf.global_variables_initializer() self.sess.run(init_op) log.info("Start inference.") if self.load(self.checkpoint_dir, ckpt_nmbr): log.info(" [*] Load SUCCESS") else: if self.load(self.checkpoint_long_dir, ckpt_nmbr): log.info(" [*] Load SUCCESS") else: log.error(" [!] Load failed...") # Create folder to store results. if to_save_dir is None: to_save_dir = os.path.join( self.root_dir, self.model_name, 'inference_ckpt%d_sz%d' % (self.initial_step, self.image_size)) if not os.path.exists(to_save_dir): os.makedirs(to_save_dir) return to_save_dir
def __init__(self, path_to_art_dataset): self.dataset = [os.path.join(path_to_art_dataset, x) for x in os.listdir(path_to_art_dataset)] log.info("Art dataset contains %d images." % len(self.dataset))
return True, dev else: dev = torch.device("cpu") return False, dev if __name__ == '__main__': """ 程序入口函数 """ args = parser.parse_args() log.init("FaceNeural", logging.INFO, log_path="./output/neural_log.txt") cuda, device = init_device(args) if args.phase == "train_imitator": log.info('imitator train mode') imitator = Imitator("neural imitator", args) if cuda: imitator.cuda() imitator.batch_train(cuda) elif args.phase == "train_extractor": log.info('feature extractor train mode') extractor = Extractor("neural extractor", args) if cuda: extractor.cuda() extractor.batch_train(cuda) elif args.phase == "inference_imitator": log.info("inference imitator") imitator = Imitator("neural imitator", args, clean=False) if cuda: imitator.cuda()
def train(self, args, ckpt_nmbr=None): # Initialize augmentor. augmentor = img_augm.Augmentor( crop_size=[self.options.image_size, self.options.image_size], vertical_flip_prb=0., hsv_augm_prb=1.0, hue_augm_shift=0.05, saturation_augm_shift=0.05, saturation_augm_scale=0.05, value_augm_shift=0.05, value_augm_scale=0.05, ) content_dataset_coco = prepare_dataset.CocoDataset( path_to_dataset=self.options.path_to_content_dataset) art_dataset = prepare_dataset.ArtDataset( path_to_art_dataset=self.options.path_to_art_dataset) # Initialize queue workers for both datasets. q_art = multiprocessing.Queue(maxsize=10) q_content = multiprocessing.Queue(maxsize=10) jobs = [] for i in range(5): p = multiprocessing.Process( target=content_dataset_coco.initialize_batch_worker, args=(q_content, augmentor, self.batch_size, i)) p.start() jobs.append(p) p = multiprocessing.Process( target=art_dataset.initialize_batch_worker, args=(q_art, augmentor, self.batch_size, i)) p.start() jobs.append(p) log.info("Processes are started.") time.sleep(3) # Now initialize the graph init_op = tf.global_variables_initializer() self.sess.run(init_op) log.info("Start training.") if self.load(self.checkpoint_dir, ckpt_nmbr): log.info(" [*] Load SUCCESS") else: if self.load(self.checkpoint_long_dir, ckpt_nmbr): log.info(" [*] Load SUCCESS") else: log.error(" [!] Load failed...") # Initial discriminator success rate. win_rate = args.discr_success_rate discr_success = args.discr_success_rate alpha = 0.05 for step in tqdm(range(self.initial_step, self.options.total_steps + 1), initial=self.initial_step, total=self.options.total_steps): # Get batch from the queue with batches q, if the last is non-empty. while q_art.empty() or q_content.empty(): pass batch_art = q_art.get() batch_content = q_content.get() if discr_success >= win_rate: # Train generator _, summary_all, gener_acc_ = self.sess.run( [ self.g_optim_step, self.summary_merged_all, self.gener_acc ], feed_dict={ self.input_painting: normalize_arr_of_imgs(batch_art['image']), self.input_photo: normalize_arr_of_imgs(batch_content['image']), self.lr: self.options.lr }) discr_success = discr_success * (1. - alpha) + alpha * ( 1. - gener_acc_) else: # Train discriminator. _, summary_all, discr_acc_ = self.sess.run( [ self.d_optim_step, self.summary_merged_all, self.discr_acc ], feed_dict={ self.input_painting: normalize_arr_of_imgs(batch_art['image']), self.input_photo: normalize_arr_of_imgs(batch_content['image']), self.lr: self.options.lr }) discr_success = discr_success * (1. - alpha) + alpha * discr_acc_ self.writer.add_summary(summary_all, step * self.batch_size) if step % self.options.save_freq == 0 and step > self.initial_step: self.save(step) # And additionally save all checkpoints each 15000 steps. if step % 15000 == 0 and step > self.initial_step: self.save(step, is_long=True) if step % 500 == 0: output_paintings_, output_photos_ = self.sess.run( [self.input_painting, self.output_photo], feed_dict={ self.input_painting: normalize_arr_of_imgs(batch_art['image']), self.input_photo: normalize_arr_of_imgs(batch_content['image']), self.lr: self.options.lr }) save_batch( input_painting_batch=batch_art['image'], input_photo_batch=batch_content['image'], output_painting_batch=denormalize_arr_of_imgs( output_paintings_), output_photo_batch=denormalize_arr_of_imgs(output_photos_), filepath='%s/step_%d.jpg' % (self.sample_dir, step)) log.info("Training is finished. Terminate jobs.") for p in jobs: p.join() p.terminate() log.info("Done.")
def inference_video(self, args, path_to_folder, to_save_dir=None, resize_to_original=True, use_time_smooth_randomness=True, ckpt_nmbr=None): """ Run inference on the video frames. Original aspect ratio will be preserved. Args: args: path_to_folder: path to the folder with frames from the video to_save_dir: resize_to_original: use_time_smooth_randomness: change the random vector which is added to the bottleneck features linearly over tim Returns: """ init_op = tf.global_variables_initializer() self.sess.run(init_op) log.info("Start inference.") if self.load(self.checkpoint_dir, ckpt_nmbr): log.info(" [*] Load SUCCESS") else: if self.load(self.checkpoint_long_dir, ckpt_nmbr): log.info(" [*] Load SUCCESS") else: log.info(" [!] Load failed...") # Create folder to store results. if to_save_dir is None: to_save_dir = os.path.join( self.root_dir, self.model_name, 'inference_ckpt%d_sz%d' % (self.initial_step, self.image_size)) if not os.path.exists(to_save_dir): os.makedirs(to_save_dir) image_paths = sorted(os.listdir(path_to_folder)) num_images = len(image_paths) for img_idx, img_name in enumerate(tqdm(image_paths)): img_path = os.path.join(path_to_folder, img_name) img = scipy.misc.imread(img_path, mode='RGB') img_shape = img.shape[:2] # Prepare image for feeding into network. scale_mult = self.image_size / np.min(img_shape) new_shape = (np.array(img_shape, dtype=float) * scale_mult).astype(int) img = scipy.misc.imresize(img, size=new_shape) img = np.expand_dims(img, axis=0) if use_time_smooth_randomness and img_idx == 0: features_delta = self.sess.run( self.labels_to_concatenate_to_features, feed_dict={ self.input_photo: normalize_arr_of_imgs(img), }) features_delta_start = features_delta + np.random.random( size=features_delta.shape) * 0.5 - 0.25 features_delta_start = features_delta_start.clip(0, 1000) print('features_delta_start.shape=', features_delta_start.shape) features_delta_end = features_delta + np.random.random( size=features_delta.shape) * 0.5 - 0.25 features_delta_end = features_delta_end.clip(0, 1000) step = (features_delta_end - features_delta_start) / (num_images - 1) feed_dict = { self.input_painting: normalize_arr_of_imgs(img), self.input_photo: normalize_arr_of_imgs(img), self.lr: self.options.lr } if use_time_smooth_randomness: pass img = self.sess.run(self.output_photo, feed_dict=feed_dict) img = img[0] img = denormalize_arr_of_imgs(img) if resize_to_original: img = scipy.misc.imresize(img, size=img_shape) else: pass scipy.misc.imsave( os.path.join(to_save_dir, img_name[:-4] + "_stylized.jpg"), img) print("Inference is finished.")
cv2.imwrite(path_save.replace("align_", "align2_"), scaled) return scaled except Exception as e: log.error(e) def clean(path): for root, dirs, files in os.walk(path): for file in files: if file.startswith("align"): path = os.path.join(root, file) os.remove(path) def export(path): for root, dirs, files in os.walk(path): for file in files: path1 = os.path.join(root, file) path2 = os.path.join(root, "align_" + file) face_features(path1, path2) if __name__ == '__main__': log.init("align") pwd = os.getcwd() project_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + ".") model_path = os.path.join(project_path, "neural/output/image/") log.info(model_path) clean(model_path) export(model_path)
""" count = len(self.losses) if count > 0: plt.style.use('seaborn-whitegrid') x = range(count) y1 = [] y2 = [] for it in self.losses: y1.append(it[0]) y2.append(it[1]) plt.plot(x, y1, color='r', label='l1') plt.plot(x, y2, color='g', label='l2') plt.ylabel("loss") plt.xlabel('step') plt.legend() path = os.path.join(self.prev_path, "loss.png") plt.savefig(path) plt.close('all') if __name__ == '__main__': import logging from parse import parser log.info("evaluation mode start") args = parser.parse_args() log.init("FaceNeural", logging.INFO, log_path="./output/evaluate.txt") evl = Evaluate(args, cuda=torch.cuda.is_available()) img = cv2.imread(args.eval_image).astype(np.float32) evl.itr_train(img)
close connect """ if self.open: log.warn("socket close") self._send('q', "-") # quit self.socket.close() self.open = False if __name__ == '__main__': from parse import parser import logging args = parser.parse_args() log.init("FaceNeural", logging.INFO, log_path="./output/log.txt") log.info(utils.curr_roleshape(args.path_to_dataset)) net = Net(args.udp_port, args) while True: r_input = input("command: ") if r_input == "m": net.send_message("hello world") elif r_input == "p": params = utils.random_params(args.params_cnt) net.send_param(params, str(random.randint(1000, 9999))) elif r_input == "q": net.close() break else: log.error("unknown code, quit")
def __init__(self, message): log.info("neural error: " + message) self.message = "neural exception: " + message