def main(_): args = parser.parse_args() with tf.Session() as sess: model = Face(sess, args) if args.phase == "train": model.train(args) print('train mode')
def load(parsr,website,args): from parse import parser deflt={key:None for key in vars(parsr)} parser.set_defaults(**deflt) par=parser.parse_args(args[1:]) number=parsr.number[0] lines,nb=loadlines() if lines==0: return if number<0: number=nb-number if number>nb or number<0: print "This history line doesn't exist" line=lines[number].split('\t') model=save_patern[1:-2].split('}\t{') for i in xrange(len(line)): try:val=save_type[i](line[i]) except: pass if getattr(par,model[i],None)==None: setattr(par,model[i],val) par.nb_pages=[par.stop-par.start+1] print par return par
def main(_): args = parser.parse_args() tfconfig = tf.ConfigProto(allow_soft_placement=False) tfconfig.gpu_options.allow_growth = True with tf.Session(config=tfconfig) as sess: model = Artgan(sess, args) if args.phase == 'train': print("Train.") model.train(args, ckpt_nmbr=args.ckpt_nmbr) if args.phase == 'inference' or args.phase == 'test': print("Inference.") model.inference(args, args.inference_images_dir, resize_to_original=False, to_save_dir=args.save_dir, ckpt_nmbr=args.ckpt_nmbr) if args.phase == 'inference_on_frames' or args.phase == 'test_on_frames': print("Inference on frames sequence.") model.inference_video(args, path_to_folder=args.inference_images_dir[0], resize_to_original=False, to_save_dir=args.save_dir, ckpt_nmbr=args.ckpt_nmbr) if args.phase == "export_layers": print("export_layers.") model.export_layers(args.inference_images_dir, to_save_dir=args.save_dir, ckpt_nmbr=args.ckpt_nmbr) if args.phase == "export_arg": print("export_arg.") model.export_arg(args.ckpt_name) sess.close()
def main(_): args = parser.parse_args() log.init("FaceNeural", logging.DEBUG, log_path="output/log.txt") with tf.Session() as sess: if args.phase == "train": model = Face(sess, args) model.train(args) log.info('train mode') elif args.phase == "inference": log.info("inference") model = Face(sess, args) model.inference(args) elif args.phase == "lightcnn": log.info("light cnn test") elif args.phase == "faceparsing": log.info("faceparsing") elif args.phase == "net": log.info("net start with ports (%d, %d)", 5010, 5011) net = Net(5010, 5011) while True: r_input = raw_input("command: \n") if r_input == "s": msg = raw_input("input: ") net.only_send(msg) elif r_input == 'r': msg = raw_input("input: ") net.send_recv(msg) elif r_input == "q": net.only_send("quit") net.close() break else: log.error("unknown code, quit") net.close() break
and arguments.use_gpu) if support_gpu and arguments.use_gpu: if not arguments.gpuid: arguments.gpuid = 0 dev = torch.device("cuda:%d" % arguments.gpuid) return True, dev else: dev = torch.device("cpu") return False, dev if __name__ == '__main__': """ 程序入口函数 """ args = parser.parse_args() log.init("FaceNeural", logging.INFO, log_path="./output/neural_log.txt") cuda, device = init_device(args) if args.phase == "train_imitator": log.info('imitator train mode') imitator = Imitator("neural imitator", args) if cuda: imitator.cuda() imitator.batch_train(cuda) elif args.phase == "train_extractor": log.info('feature extractor train mode') extractor = Extractor("neural extractor", args) if cuda: extractor.cuda() extractor.batch_train(cuda)