예제 #1
0
    def addClassDir(projectname, dirname):
        train_dir = PathProxy.getProjectTrainDir(projectname)
        PathProxy.mkdir(train_dir + dirname)

        test_dir = PathProxy.getProjectTestDir(projectname)
        PathProxy.mkdir(test_dir + dirname)
        pass
예제 #2
0
    def getClasses(projectname):
        train_path = PathProxy.getProjectTrainDir(projectname)

        for maindir, pdir, etcfile in os.walk(train_path):
            if maindir == train_path:
                rtn = {}
                for p in pdir:
                    """
                    ps = p.split('_')
                    
                    if len(ps) == 2:
                        try:
                            mark = int(ps[0])
                            
                            rtn[ps[0]] = ps[1]
                        except Exception as error:
                            a = 0
                    """
                    rtn[str(len(rtn))] = p
                return rtn
        return {}
예제 #3
0
    def initTrain(self, projectname, tag):

        param = CNNDivParam(projectname, tag)

        modeltype = param.Type()
        modelpath = PathProxy.getModelTagDir(projectname, tag)
        if modeltype == "cnn-div":

            train_dir = PathProxy.getProjectTrainDir(projectname)
            train_images, train_labels = CoachProxy.getInput(
                modeltype, param, train_dir)

            train_images_batch, train_labels_batch = CoachProxy.getBatch(
                modeltype, param, train_images, train_labels)

            x = tf.placeholder(shape=train_images_batch.shape,
                               dtype=train_images_batch.dtype,
                               name="data_batch")
            y = tf.placeholder(shape=train_labels_batch.shape,
                               dtype=train_labels_batch.dtype,
                               name="label_batch")

            train_logit = CoachProxy.getLogit(modeltype, param, x)

            train_loss = CoachProxy.getLoss(modeltype, param, train_logit, y)

            train_op = CoachProxy.getTrain(modeltype, param, train_loss)

            train_acc = CoachProxy.getEnvaluation(modeltype, param,
                                                  train_logit, y)
            self.train_images_batch = train_images_batch
            self.train_labels_batch = train_labels_batch
            self.train_logit = train_logit
            self.train_loss = train_loss
            self.train_op = train_op
            self.train_acc = train_acc

            test_dir = PathProxy.getProjectTestDir(projectname)
            test_images, test_labels = CoachProxy.getInput(
                modeltype, param, test_dir)

            self.test_dir = test_dir
            self.test_images = test_images

            if len(test_images) > 0:
                test_images_batch, test_labels_batch = CoachProxy.getBatch(
                    modeltype, param, test_images, test_labels)

                self.test_images_batch = test_images_batch
                self.test_labels_batch = test_labels_batch

        #self.summary_op = tf.summary.merge([scalar_tra_loss,scalar_tra_acc,scalar_test_loss,scalar_test_acc])
        self.dict_summary = {
            "train/loss": 0.0,
            "train/accuracy": 0.0,
            "test/loss": 0.0,
            "test/accuracy": 0.0
        }
        list_merge = []
        for k in self.dict_summary:

            sh = tf.placeholder(shape=[], dtype=tf.float32, name=k)
            sc = tf.summary.scalar(k, sh)
            list_merge.append(sc)
        self.summary_op = tf.summary.merge(list_merge)

        self.savedir = os.path.join(modelpath, "saver")

        self.logdir = os.path.join(modelpath, "log")

        self.saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(self.savedir)

        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())

        self.sess.graph.as_default()
        if ckpt and ckpt.model_checkpoint_path:
            model_name = ckpt.model_checkpoint_path.split('\\')[-1]
            global_step = model_name.split('-')[-1]
            saverpath = os.path.join(self.savedir, model_name)
            self.saver.restore(self.sess, saverpath)
            self.curstep = int(global_step)
            print('Loading success, global_step is %s' % global_step)

        else:

            PathProxy.mkdir(self.savedir)
            PathProxy.mkdir(self.logdir)

            self.curstep = 0
            print("Can not load ckpt")

        self.coord = tf.train.Coordinator()
        self.threads = tf.train.start_queue_runners(sess=self.sess,
                                                    coord=self.coord)

        self.period = param.Period()
        self.saveperiod = param.SavePeriod()
        self.maxperiod = param.MaxPeriod()

        self.runflag = True

        self.writer = tf.summary.FileWriter(self.logdir, self.sess.graph)
        #print(CoachProxy.getInput(modeltype,param,train_dir))

        return True