def addClassDir(projectname, dirname): train_dir = PathProxy.getProjectTrainDir(projectname) PathProxy.mkdir(train_dir + dirname) test_dir = PathProxy.getProjectTestDir(projectname) PathProxy.mkdir(test_dir + dirname) pass
def createDir(projectname, tag): if not tag: tag = CoachProxy.getTimeStamp() dir = PathProxy.getModelDir(projectname) + tag + "/" PathProxy.mkdir(dir) setting = CNNDivSetting(projectname) param = CNNDivParam(projectname, tag) setting.copy2(param) CoachProxy.recordClasses(param, projectname) else: dir = PathProxy.getModelDir(projectname) + tag + "/" return dir, tag
def initProject(projectname, setting): rtn = {'success': False} proj_path = PathProxy.getProjectDir(projectname) if os.path.exists(proj_path): rtn['error'] = "can not init project because it has exists" return rtn PathProxy.mkdir(proj_path) PathProxy.mkdir(os.path.join(proj_path, "train")) PathProxy.mkdir(os.path.join(proj_path, "model")) PathProxy.mkdir(os.path.join(proj_path, "test")) # cnn type here psetting = CNNDivSetting(projectname) psetting.createConfig() rtn['success'] = True return rtn
from flask_restful import * #Api,Resource from flask_cors import * import multiprocessing from prx.PathProxy import PathProxy import tensorflow as tf from api.project_ import * from api.class_ import * from api.train_ import * from api.test_ import * from api.fastcnn_ import * app_path = PathProxy.app_path project_path = PathProxy.project_path PathProxy.mkdir(project_path) app_log = SimpleLog(os.path.join(app_path, "logs") + "\\") tf.logging.set_verbosity(tf.logging.ERROR) config = Config(PathProxy.getConfigPath()) app = Flask(__name__) api = Api(app) CORS(app, supports_credentials=True) api.add_resource(FastCnnApi, '/api/nn/fastcnn') api.add_resource(ProjectApi, '/api/project') api.add_resource(ClassApi, '/api/class')
def initTrain(self, projectname, tag): param = CNNDivParam(projectname, tag) modeltype = param.Type() modelpath = PathProxy.getModelTagDir(projectname, tag) if modeltype == "cnn-div": train_dir = PathProxy.getProjectTrainDir(projectname) train_images, train_labels = CoachProxy.getInput( modeltype, param, train_dir) train_images_batch, train_labels_batch = CoachProxy.getBatch( modeltype, param, train_images, train_labels) x = tf.placeholder(shape=train_images_batch.shape, dtype=train_images_batch.dtype, name="data_batch") y = tf.placeholder(shape=train_labels_batch.shape, dtype=train_labels_batch.dtype, name="label_batch") train_logit = CoachProxy.getLogit(modeltype, param, x) train_loss = CoachProxy.getLoss(modeltype, param, train_logit, y) train_op = CoachProxy.getTrain(modeltype, param, train_loss) train_acc = CoachProxy.getEnvaluation(modeltype, param, train_logit, y) self.train_images_batch = train_images_batch self.train_labels_batch = train_labels_batch self.train_logit = train_logit self.train_loss = train_loss self.train_op = train_op self.train_acc = train_acc test_dir = PathProxy.getProjectTestDir(projectname) test_images, test_labels = CoachProxy.getInput( modeltype, param, test_dir) self.test_dir = test_dir self.test_images = test_images if len(test_images) > 0: test_images_batch, test_labels_batch = CoachProxy.getBatch( modeltype, param, test_images, test_labels) self.test_images_batch = test_images_batch self.test_labels_batch = test_labels_batch #self.summary_op = tf.summary.merge([scalar_tra_loss,scalar_tra_acc,scalar_test_loss,scalar_test_acc]) self.dict_summary = { "train/loss": 0.0, "train/accuracy": 0.0, "test/loss": 0.0, "test/accuracy": 0.0 } list_merge = [] for k in self.dict_summary: sh = tf.placeholder(shape=[], dtype=tf.float32, name=k) sc = tf.summary.scalar(k, sh) list_merge.append(sc) self.summary_op = tf.summary.merge(list_merge) self.savedir = os.path.join(modelpath, "saver") self.logdir = os.path.join(modelpath, "log") self.saver = tf.train.Saver() ckpt = tf.train.get_checkpoint_state(self.savedir) self.sess = tf.Session() self.sess.run(tf.global_variables_initializer()) self.sess.graph.as_default() if ckpt and ckpt.model_checkpoint_path: model_name = ckpt.model_checkpoint_path.split('\\')[-1] global_step = model_name.split('-')[-1] saverpath = os.path.join(self.savedir, model_name) self.saver.restore(self.sess, saverpath) self.curstep = int(global_step) print('Loading success, global_step is %s' % global_step) else: PathProxy.mkdir(self.savedir) PathProxy.mkdir(self.logdir) self.curstep = 0 print("Can not load ckpt") self.coord = tf.train.Coordinator() self.threads = tf.train.start_queue_runners(sess=self.sess, coord=self.coord) self.period = param.Period() self.saveperiod = param.SavePeriod() self.maxperiod = param.MaxPeriod() self.runflag = True self.writer = tf.summary.FileWriter(self.logdir, self.sess.graph) #print(CoachProxy.getInput(modeltype,param,train_dir)) return True