Esempio n. 1
0
class EncoderTrainer(ApplicationSession):

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.batch_size = 2
        self.needStop = False
        self.codec = Codec()
        self.model = Model('train')

    @asyncio.coroutine
    def train(self):
        self.publish('semanticaio.encoder.trainer.started')
        print('[emit] semanticaio.encoder.trainer.started')
        input_dataset = np.zeros((self.batch_size, self.codec.seq_len, self.codec.n_chars), dtype = np.bool)
        output_dataset = np.zeros((self.batch_size, self.codec.seq_len, self.codec.n_chars), dtype = np.bool)
        while not self.needStop :
            yield from asyncio.sleep(0.1)
            batch = yield from self.call('semanticaio.db.batch.get', size = self.batch_size)
            for i, question in enumerate(batch) :
                self.codec.encode(question['sentence'], input_dataset[i])
                if question['correctFormId'] == None :
                    self.codec.encode(question['sentence'], output_dataset[i])
                else :
                    correctQuestion = yield from self.call('semanticaio.db.get', id = question['correctFormId'])
                    self.codec.encode(correctQuestion['sentence'], output_dataset[i])
            (loss, accuracy) = self.model.train(input_dataset, output_dataset)
            print('training:', loss, accuracy)

        self.needStop = False
        self.publish('semanticaio.encoder.trainer.stopped')
        print('[emit] semanticaio.encoder.trainer.stopped')

    def load(self, *args, **kwargs):
        print('[call] semanticaio.encoder.trainer.load')
        try :
            self.model.load()
        except :
            print('[error] semanticaio.encoder.trainer.load')
        self.model.compile()

    def save(self, *args, **kwargs):
        print('[call] semanticaio.encoder.trainer.save')
        self.model.save()

    @asyncio.coroutine
    def start(self, *args, **kwargs):
        print('[event received] semanticaio.encoder.trainer.start')
        yield from self.train()

    def stop(self, *args, **kwargs):
        print('[event received] semanticaio.encoder.trainer.stop')
        self.needStop = True

    @asyncio.coroutine
    def onJoin(self, details):
        yield from self.register(self.load, 'semanticaio.encoder.trainer.load')
        yield from self.register(self.save, 'semanticaio.encoder.trainer.save')
        yield from self.subscribe(self.start, 'semanticaio.encoder.trainer.start')
        yield from self.subscribe(self.stop, 'semanticaio.encoder.trainer.stop')
        print('[encoder-trainer started]')
Esempio n. 2
0
def main(opts):
    cap = cv2.VideoCapture(opts.input)

    model = Model(66, opts.size)
    model.load(opts.weights)

    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    out = cv2.VideoWriter('data/res.mp4', fourcc, 10, (640, 480))

    while True:
        _, img = cap.read()

        # Предикт лица (по дефолту тут HOG)
        inp_img = img[:, :, ::-1]
        face_locations = face_recognition.face_locations(inp_img)

        for (top, right, bottom, left) in face_locations:
            # Расширяем ббокс и смещаем вертикально
            bbox_width = abs(bottom - top)
            bbox_height = abs(right - left)
            left -= int(2 * bbox_width / 4)
            right += int(2 * bbox_width / 4)
            top -= int(3 * bbox_height / 4)
            bottom += int(bbox_height / 4)

            # Выход за пределы
            top = max(top, 0)
            left = max(left, 0)
            bottom = min(img.shape[0], bottom)
            right = min(img.shape[1], right)

            crop = img[top:bottom, left:right]

            # Ресайз по меньшей стороне и кроп от центра
            crop = resize_center_crop(crop, opts.size)

            # Нормализация
            normed_img = normalize(crop)
            imgs = []
            imgs.append(normed_img)

            # Предикт
            res = model.test_online(imgs)

            # Отрисовка
            img = draw_axis(img,
                            *res,
                            tdx=(left + right) / 2,
                            tdy=(top + bottom) / 2,
                            size=100)
            cv2.rectangle(img, (left, top), (right, bottom), (255, 0, 0), 1)

        out.write(img)
        cv2.imshow('img', img)
        if cv2.waitKey(1) == 27:
            break
    out.release()
Esempio n. 3
0
def main(opts):
    model = Model(66, opts.size)

    # Если есть предобучение, то возьмем
    if opts.pretrain is not None:
        print(f'Initial weights from {opts.pretrain}')
        model.load(opts.pretrain)

    train_list, val_list = split(opts.data)

    # Обучающий лоадер с аугментациями, но там их не сильно много
    train_dataset = AFLW2000(train_list,
                             augment=True,
                             batch_size=opts.bs,
                             input_size=opts.size)
    val_dataset = AFLW2000(val_list, batch_size=opts.bs, input_size=opts.size)

    # Учим
    chkpt_name = f'model_size{opts.size}_e{opts.epoch}_lr{opts.lr:.01E}.h5'
    model.train(chkpt_name, train_dataset, val_dataset, opts.epoch)
Esempio n. 4
0
def main(opts):
    model = Model(66, opts.size)
    model.model.summary()
    model.load(opts.weights)

    train_list, val_list = split(opts.data)
    val_dataset = AFLW2000(val_list, batch_size=1, input_size=opts.size)

    err, times = [], []
    for idx, (x, y) in enumerate(val_dataset.data_generator()):
        print(f'{idx}/{val_dataset.epoch_steps}')

        t1 = time()
        res = model.test_online(x)
        times.append(time() - t1)
        ypr = np.array(y)[:, 0, 1]
        err.append(abs(ypr - res))

        print(f'YPR: {np.mean(np.array(err), axis=0)}')
        print(f'TIME: {np.mean(times)}')
        if idx == val_dataset.epoch_steps:
            break
Esempio n. 5
0
class Encoder(ApplicationSession):

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.codec = Codec()
        self.model = Model('encode')

    def load(self, *args, **kwargs):
        print('[call] semanticaio.encoder.load')
        try :
            self.model.load()
        except :
            print('[error] semanticaio.encoder.load')
        self.model.compile()

    def _encode(self, question) :
        coded_question = np.zeros((self.codec.seq_len, self.codec.n_chars), dtype = np.bool)
        self.codec.encode(question, coded_question)
        return self.model.encode(coded_question).tolist()

    def encode(self, *args, **kwargs):
        print('[call] semanticaio.encoder.encode:', kwargs)
        result = {}
        if 'question' in kwargs :
            result['encoded'] = self._encode(kwargs['question'])
        elif 'questions' in kwargs :
            result['encoded'] = []
            for question in kwargs['questions'] :
                result['encoded'].append(self._encode(question))
        return result

    @coroutine
    def onJoin(self, details):
        yield from self.register(self.load, 'semanticaio.encoder.load')
        yield from self.register(self.encode, 'semanticaio.encoder.encode')
        print('[encoder started]')
Esempio n. 6
0
    parser.add_argument("--bank", help="--bank 名称(jianhang|renfa|nonghang)")
    parser.add_argument("--image",
                        help="--image 图片名称(位于data/validate/<bank代号>/)")
    parser.add_argument("--test", type=int, help="--test 需要测试的图片数量")

    args = parser.parse_args()
    config = Config()

    if args.bank == None:
        parser.print_help()
        exit()

    conf = config.get(args.bank)

    _model = Model()
    model = _model.load(conf)

    if (model == None):
        log.error("模型文件[%s]不存在", _model.model_path())

    if args.test == None and args.image == None:
        parser.print_help()
        exit()

    if args.image != None:
        image_path = "data/validate/" + args.bank + "/" + args.image
        image_data = cv2.imread(image_path)
        print("预测图片为:" + image_path)
        print("预测结果为:" + predict(image_data, args.image, conf, model))
        exit()