Esempio n. 1
0
 def init(self):
     self.capture = cv2.VideoCapture('data/' + self.file_name + '.avi')
     self.screenshots = []
     self.timestamps = pickle.load(
         open('data/' + self.file_name + '.timestamp', 'rb'))
     self.frames = compress_pickle.load('data/' + self.file_name +
                                        '_labeled.gz')
     self.frame_id = 0
     self.auto_play = False
     self.history = History()
     [self.scalar, self.clf] = pickle.load(open('model/tap.model', 'rb'))
Esempio n. 2
0
    def train(self, save_path, name, cross_validation=True, reshape=None):
        history = History(save_path, name)

        fold_index = 0

        for train_dataset, test_dataset in self.feed.take(
                self.n_splits if cross_validation else 1):
            # train_dataset是训练集,test_dataset是测试集。train_dataset[0]和test_dataset[0]是data,四维数组;train[1]和test[1]是label,二维数组。
            fold_index += 1
            pca_model = PCA(n_components=self.pca_components)
            lda_model = LinearDiscriminantAnalysis()

            train_data = numpy.array(train_dataset[0])
            train_label = numpy.array(train_dataset[1])
            test_data = numpy.array(test_dataset[0])
            test_label = numpy.array(test_dataset[1])

            pca_model.fit(train_data.reshape((train_data.shape[0], -1)))
            train_pc = pca_model.transform(
                train_data.reshape((train_data.shape[0], -1)))
            lda_model.fit(train_pc, train_label)

            # model.load_weights(os.path.join(save_path, 'test_weights.h5'))
            r = {}
            r['acc'] = lda_model.score(train_pc, train_label)
            print('acc = ' + str(r['acc']))
            test_pc = pca_model.transform(
                test_data.reshape((test_data.shape[0], -1)))
            r['val_acc'] = lda_model.score(test_pc, test_label)
            print('val_acc = ' + str(r['val_acc']))
            history.add(str(fold_index), r)

            # Save weights to a HDF5 file
            # self.model.save(self.save_path)
        history.save()
Esempio n. 3
0
    def train(self, save_path, name, cross_validation=True, reshape=None):
        history = History(save_path, name)

        fold_index = 0

        for train_dataset, test_dataset in self.feed.take(
                self.n_splits if cross_validation else 1):
            # train_dataset是训练集,test_dataset是测试集。train_dataset[0]和test_dataset[0]是data,四维数组;train[1]和test[1]是label,二维数组。
            fold_index += 1
            svm_model = SVC()

            train_data = numpy.array(train_dataset[0])
            train_label = numpy.array(train_dataset[1])
            test_data = numpy.array(test_dataset[0])
            test_label = numpy.array(test_dataset[1])

            svm_model.fit(train_data.reshape((train_data.shape[0], -1)),
                          train_label)

            # model.load_weights(os.path.join(save_path, 'test_weights.h5'))
            r = {}
            r['acc'] = svm_model.score(
                train_data.reshape((train_data.shape[0], -1)), train_label)
            print('acc = ' + str(r['acc']))
            r['val_acc'] = svm_model.score(
                test_data.reshape((train_data.shape[0], -1)), test_label)
            print('val_acc = ' + str(r['val_acc']))
            history.add(str(fold_index), r)

            # Save weights to a HDF5 file
            # self.model.save(self.save_path)
        history.save()
Esempio n. 4
0
    def train(self, save_path, name, cross_validation=True):
        history = History(save_path, name)

        fold_index = 0
        if version.parse(tf.__version__) < version.parse('1.13'):
            self.compile(optimizer=tf.train.AdamOptimizer(
                learning_rate=self.learning_rate),
                         loss='categorical_crossentropy',
                         metrics=['accuracy'])
        else:
            self.compile(
                optimizer=tf.keras.optimizers.Adam(lr=self.learning_rate),
                loss='categorical_crossentropy',
                metrics=['accuracy'])

        for train_dataset, test_dataset in self.feed.take(
                self.n_splits if cross_validation else 1):
            # train_dataset是训练集,test_dataset是测试集。train_dataset[0]和test_dataset[0]是data,四维数组;train[1]和test[1]是label,二维数组。

            fold_index += 1

            checkpoint_file = os.path.join(
                save_path, 'cp_' + '{:02d}'.format(fold_index) +
                '{epoch:03d}_{val_loss:.4f}.h5')

            train_dataset = tf.data.Dataset.from_tensors(
                train_dataset).repeat().shuffle(1024).apply(
                    tf.data.experimental.unbatch()).batch(self.batch_size)
            test_dataset = tf.data.Dataset.from_tensors(
                test_dataset).repeat().shuffle(1024).apply(
                    tf.data.experimental.unbatch()).batch(self.batch_size)

            print('lr = ' + str(self.learning_rate) + '')
            print('batch_size = ' + str(self.batch_size) + '')

            ckpt = tf.keras.callbacks.ModelCheckpoint(checkpoint_file,
                                                      verbose=1,
                                                      save_weights_only=True,
                                                      period=1)

            # model.load_weights(os.path.join(save_path, 'test_weights.h5'))
            hist = self.fit(
                train_dataset,
                epochs=self.training_epoch,
                callbacks=[
                    ckpt,
                    tf.keras.callbacks.TensorBoard(log_dir=save_path)
                ],
                steps_per_epoch=self.epoch_steps,
                validation_data=test_dataset,
                validation_steps=self.test_steps)
            history.add(str(fold_index), dict(hist.history))

            # Save weights to a HDF5 file
            self.save(save_path)

        history.save()
Esempio n. 5
0
import compress_pickle
from PIL import ImageGrab
from board import Board
import random
from my_keyboard import MyKeyboard
import multiprocessing
from data_manager import DataManager
from train import History
import pygame
from pynput.keyboard import Key, Controller
from layout import Layout

if __name__ == "__main__":
    board = Board()
    keyboard = MyKeyboard()
    history = History()
    [scalar, clf] = pickle.load(open('model/tap.model', 'rb'))
    labels = [None for i in range(20)]
    controller = Controller()
    qwerty = Layout()

    pygame.mixer.init(22050, -16, 2, 64)
    pygame.init()
    sound = pygame.mixer.Sound("sound/type.wav")
    sound.set_volume(1.0)

    while True:
        if keyboard.is_pressed_down('Esc'):
            break

        frame = board.getNewFrame()
Esempio n. 6
0
class Replay():
    SCREENSHOT_W = 980
    SCREENSHOT_H = 540

    def __init__(self, file_name):
        self.file_name = file_name
        self.init()
        thread = threading.Thread(target=self._run)
        thread.start()

    def _run(self):
        cv2.namedWindow('screenshot')
        cv2.setMouseCallback('screenshot', self._screenshotMouseCallback)
        cv2.namedWindow('frame')
        cv2.setMouseCallback('frame', self._frameMouseCallback)

        self.is_running = True
        while self.is_running:
            self._renderScreenshot()
            self.frames[self.frame_id].output()
            if self.auto_play:
                self.incFrame()

    def _renderScreenshot(self):
        W = Replay.SCREENSHOT_W
        H = Replay.SCREENSHOT_H
        screenshot = self._getScreenshot(self.frames[self.frame_id].timestamp)
        screenshot = cv2.resize(screenshot, (W, H))
        cv2.line(screenshot, (0, int(0.9 * H)), (W - 1, int(0.9 * H)),
                 (192, 192, 192), 3)
        schedule = float(self.frame_id) / len(self.frames)
        cv2.rectangle(screenshot, (int(schedule * W - 2), int(0.9 * H - 10)),
                      (int(schedule * W + 2), int(0.9 * H + 10)),
                      (255, 255, 255), -1)
        cv2.imshow('screenshot', screenshot)

    def _getScreenshot(self, timestamp):
        for index in range(len(self.timestamps)):
            if timestamp < self.timestamps[index]:
                break
        while (index >= len(self.screenshots)):
            succ, image = self.capture.read()
            self.screenshots.append(image)
        image = self.screenshots[index]
        return image

    def _screenshotMouseCallback(self, event, x, y, flags, param):
        if event == cv2.EVENT_LBUTTONDOWN or flags == cv2.EVENT_FLAG_LBUTTON:
            x = float(x) / (Replay.SCREENSHOT_W)
            y = float(y) / (Replay.SCREENSHOT_H)
            if (0.8 <= y and y <= 1.0) and (0.0 <= x and x <= 1.0):
                self.frame_id = int(x * (len(self.frames) - 1))

    def _frameMouseCallback(self, event, x, y, flags, param):
        label = None
        if event == cv2.EVENT_LBUTTONDOWN:
            label = 1
        if event == cv2.EVENT_RBUTTONDOWN:
            label = 0
        if event == cv2.EVENT_MBUTTONDOWN:
            label = -1

        if label != None:
            frame = self.frames[self.frame_id]
            (R, C) = frame.force_array.shape
            x = float(x) / (C * FrameData.MAGNI)
            y = float(y) / (R * FrameData.MAGNI)

            DIST_THRESHOLD = ((10**2) / (R * C))**0.5
            min_dist = DIST_THRESHOLD
            target = -1
            for contact in frame.contacts:
                dist = ((x - contact.x)**2 + (y - contact.y)**2)**0.5
                if dist < min_dist:
                    min_dist = dist
                    target = contact.id

            if target != -1:
                for i in range(self.frame_id, -1, -1):
                    flag = False
                    for contact in self.frames[i].contacts:
                        if contact.id == target:
                            contact.label = label
                            flag = (contact.state != 1)
                    if not flag:
                        break

                for i in range(self.frame_id, len(self.frames)):
                    flag = False
                    for contact in self.frames[i].contacts:
                        if contact.id == target:
                            contact.label = label
                            flag = (contact.state != 3)
                    if not flag:
                        break

    def init(self):
        self.capture = cv2.VideoCapture('data/' + self.file_name + '.avi')
        self.screenshots = []
        self.timestamps = pickle.load(
            open('data/' + self.file_name + '.timestamp', 'rb'))
        self.frames = compress_pickle.load('data/' + self.file_name +
                                           '_labeled.gz')
        self.frame_id = 0
        self.auto_play = False
        self.history = History()
        [self.scalar, self.clf] = pickle.load(open('model/tap.model', 'rb'))

    def stop(self):
        self.is_running = False
        file_path = 'data/' + self.file_name + '_checked.gz'
        DataManager().judgeFileExistance(file_path)
        compress_pickle.dump(self.frames, file_path)
        for frame in self.frames:
            frame.force_array = None
        pickle.dump(self.frames,
                    open('data/' + self.file_name + '.simple', 'wb'))

    def incFrame(self):
        if self.frame_id + 1 < len(self.frames):
            frame = self.frames[self.frame_id]
            self.history.updateFrame(frame)
            key_contacts = self.history.getKeyContact(frame)
            for contact in key_contacts:
                feature = self.scalar.transform([contact.feature])[0]
                pred = self.clf.predict([feature])[0]
                if pred != contact.label:
                    print(time.perf_counter())
                    self.auto_play = False

            self.frame_id += 1

    def decFrame(self):
        if self.frame_id - 1 >= 0:
            self.frame_id -= 1

    def setAutoPlay(self):
        self.auto_play ^= True