def createDir(projectname, tag): if not tag: tag = CoachProxy.getTimeStamp() dir = PathProxy.getModelDir(projectname) + tag + "/" PathProxy.mkdir(dir) setting = CNNDivSetting(projectname) param = CNNDivParam(projectname, tag) setting.copy2(param) CoachProxy.recordClasses(param, projectname) else: dir = PathProxy.getModelDir(projectname) + tag + "/" return dir, tag
def addClassDir(projectname, dirname): train_dir = PathProxy.getProjectTrainDir(projectname) PathProxy.mkdir(train_dir + dirname) test_dir = PathProxy.getProjectTestDir(projectname) PathProxy.mkdir(test_dir + dirname) pass
def getTagNames(projectname): rtn = {'success': False} if not ProjectProxy.isExists(projectname): rtn['error'] = "This project is not exists" return rtn modelpath = PathProxy.getModelDir(projectname) for maindir, pdir, etcfile in os.walk(modelpath): if maindir == modelpath: rtn['success'] = True rtn['data'] = pdir return rtn rtn['error'] = "some unknown error" return rtn
def initProject(projectname, setting): rtn = {'success': False} proj_path = PathProxy.getProjectDir(projectname) if os.path.exists(proj_path): rtn['error'] = "can not init project because it has exists" return rtn PathProxy.mkdir(proj_path) PathProxy.mkdir(os.path.join(proj_path, "train")) PathProxy.mkdir(os.path.join(proj_path, "model")) PathProxy.mkdir(os.path.join(proj_path, "test")) # cnn type here psetting = CNNDivSetting(projectname) psetting.createConfig() rtn['success'] = True return rtn
def getTagClasses(projectname, tag): default = {} tagdir = PathProxy.getModelParamPath(projectname, tag) if not os.path.exists(tagdir): return default cfg = Config(tagdir) for i in range(0, 1000): classname = cfg.get("Classes", str(i)) if classname: default[str(i)] = classname else: break return default
def getClasses(projectname): train_path = PathProxy.getProjectTrainDir(projectname) for maindir, pdir, etcfile in os.walk(train_path): if maindir == train_path: rtn = {} for p in pdir: """ ps = p.split('_') if len(ps) == 2: try: mark = int(ps[0]) rtn[ps[0]] = ps[1] except Exception as error: a = 0 """ rtn[str(len(rtn))] = p return rtn return {}
def testOnePicture(projectname,tag,param,image_array,image_name,BATCH_SIZE=1): success = False classes = param.Classes() picw = param.Width() pich = param.Height() picd = param.Depth() group = param.GroupEnable() if group: picd = len(param.Groups()) N_CLASSES = len(classes) model_type = param.Type() if N_CLASSES < 1: error = "model has less than one classes" return success,error #with tf.Graph().as_default(): if True: image = tf.cast(image_array, tf.float32) image = tf.image.per_image_standardization(image) image = tf.reshape(image, [BATCH_SIZE, picw, pich, picd]) logit = CoachProxy.getLogit(model_type,param,image,BATCH_SIZE) logit = tf.nn.softmax(logit) x = tf.placeholder(tf.float32, shape=[BATCH_SIZE,picw, pich, picd]) # you need to change the directories to yours. modelpath = PathProxy.getModelTagDir(projectname,tag) savedir = os.path.join(modelpath,"saver") saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(savedir) if ckpt and ckpt.model_checkpoint_path: model_name = ckpt.model_checkpoint_path.split('\\')[-1] global_step = model_name.split('-')[-1] saverpath = os.path.join(savedir,model_name) saver.restore(sess, saverpath) print('Loading success, global_step is %s' % global_step) else: print(ckpt) print('No checkpoint file found') sess.graph.as_default() img = sess.run(image_array) prediction = sess.run(logit, feed_dict={x: img}) result_data = [] for i in range(BATCH_SIZE): mightbe = np.argmax(prediction[i]) d = {"image":image_name[i],"result":classes[str(mightbe)],"percent":str(round(prediction[i][mightbe]*100.0,2))} result_data.append(d) success = True return success,result_data
from flask_restful import * #Api,Resource from flask_cors import * import multiprocessing from prx.PathProxy import PathProxy import tensorflow as tf from api.project_ import * from api.class_ import * from api.train_ import * from api.test_ import * from api.fastcnn_ import * app_path = PathProxy.app_path project_path = PathProxy.project_path PathProxy.mkdir(project_path) app_log = SimpleLog(os.path.join(app_path, "logs") + "\\") tf.logging.set_verbosity(tf.logging.ERROR) config = Config(PathProxy.getConfigPath()) app = Flask(__name__) api = Api(app) CORS(app, supports_credentials=True) api.add_resource(FastCnnApi, '/api/nn/fastcnn') api.add_resource(ProjectApi, '/api/project') api.add_resource(ClassApi, '/api/class')
def initTrain(self, projectname, tag): param = CNNDivParam(projectname, tag) modeltype = param.Type() modelpath = PathProxy.getModelTagDir(projectname, tag) if modeltype == "cnn-div": train_dir = PathProxy.getProjectTrainDir(projectname) train_images, train_labels = CoachProxy.getInput( modeltype, param, train_dir) train_images_batch, train_labels_batch = CoachProxy.getBatch( modeltype, param, train_images, train_labels) x = tf.placeholder(shape=train_images_batch.shape, dtype=train_images_batch.dtype, name="data_batch") y = tf.placeholder(shape=train_labels_batch.shape, dtype=train_labels_batch.dtype, name="label_batch") train_logit = CoachProxy.getLogit(modeltype, param, x) train_loss = CoachProxy.getLoss(modeltype, param, train_logit, y) train_op = CoachProxy.getTrain(modeltype, param, train_loss) train_acc = CoachProxy.getEnvaluation(modeltype, param, train_logit, y) self.train_images_batch = train_images_batch self.train_labels_batch = train_labels_batch self.train_logit = train_logit self.train_loss = train_loss self.train_op = train_op self.train_acc = train_acc test_dir = PathProxy.getProjectTestDir(projectname) test_images, test_labels = CoachProxy.getInput( modeltype, param, test_dir) self.test_dir = test_dir self.test_images = test_images if len(test_images) > 0: test_images_batch, test_labels_batch = CoachProxy.getBatch( modeltype, param, test_images, test_labels) self.test_images_batch = test_images_batch self.test_labels_batch = test_labels_batch #self.summary_op = tf.summary.merge([scalar_tra_loss,scalar_tra_acc,scalar_test_loss,scalar_test_acc]) self.dict_summary = { "train/loss": 0.0, "train/accuracy": 0.0, "test/loss": 0.0, "test/accuracy": 0.0 } list_merge = [] for k in self.dict_summary: sh = tf.placeholder(shape=[], dtype=tf.float32, name=k) sc = tf.summary.scalar(k, sh) list_merge.append(sc) self.summary_op = tf.summary.merge(list_merge) self.savedir = os.path.join(modelpath, "saver") self.logdir = os.path.join(modelpath, "log") self.saver = tf.train.Saver() ckpt = tf.train.get_checkpoint_state(self.savedir) self.sess = tf.Session() self.sess.run(tf.global_variables_initializer()) self.sess.graph.as_default() if ckpt and ckpt.model_checkpoint_path: model_name = ckpt.model_checkpoint_path.split('\\')[-1] global_step = model_name.split('-')[-1] saverpath = os.path.join(self.savedir, model_name) self.saver.restore(self.sess, saverpath) self.curstep = int(global_step) print('Loading success, global_step is %s' % global_step) else: PathProxy.mkdir(self.savedir) PathProxy.mkdir(self.logdir) self.curstep = 0 print("Can not load ckpt") self.coord = tf.train.Coordinator() self.threads = tf.train.start_queue_runners(sess=self.sess, coord=self.coord) self.period = param.Period() self.saveperiod = param.SavePeriod() self.maxperiod = param.MaxPeriod() self.runflag = True self.writer = tf.summary.FileWriter(self.logdir, self.sess.graph) #print(CoachProxy.getInput(modeltype,param,train_dir)) return True
def __init__(self): self._config = Config(PathProxy.getConfigPath()) pass
def __init__(self, projectname, tag): path = PathProxy.getModelParamPath(projectname, tag) self._exists = os.path.exists(path) self._config = Config(path) pass
def __init__(self, projectname): self._config = Config(PathProxy.getSettingPath(projectname)) pass
def isExists(projectname): proj_path = PathProxy.getProjectDir(projectname) return os.path.exists(proj_path)