Exemplo n.º 1
0
    def setup(self, m=2, r=2, window_size=20, batch_size=1, hidden_layer_sizes=[10], pretrain_step=20):
        self.bed = TestBed(
            m=m, r=r, window_size=window_size, batch_size=batch_size, hidden_layers_sizes=hidden_layer_sizes
        )
        self.gen = SimpleGenerator(num=m)
        self.pretrain_step = pretrain_step

        # fill the window with data
        for i in xrange(window_size):
            y = self.gen.next()
            self.bed.supply(y)
Exemplo n.º 2
0
class Worker(QtCore.QThread):

    started = QtCore.Signal()
    updated = QtCore.Signal()
    stopped = QtCore.Signal()

    def __init__(self, vis, parent=None):
        super(Worker, self).__init__(parent)
        self.bed = None
        self.gen = None

        self.vis = vis
        self.stop_flg = False
        self.mutex = QtCore.QMutex()
        pass

    def setup(self, m=2, r=2, window_size=20, batch_size=1, hidden_layer_sizes=[10], pretrain_step=20):
        self.bed = TestBed(
            m=m, r=r, window_size=window_size, batch_size=batch_size, hidden_layers_sizes=hidden_layer_sizes
        )
        self.gen = SimpleGenerator(num=m)
        self.pretrain_step = pretrain_step

        # fill the window with data
        for i in xrange(window_size):
            y = self.gen.next()
            self.bed.supply(y)

    def setGeneratorParams(self, k, n):
        if self.gen is not None:
            self.gen.setK(k)
            self.gen.setN(n)

    def setDelay(self, delay):
        self.delay = delay

    def setLearningParams(self, params):
        self.pretrain_epochs = params["pretrain_epochs"]
        self.pretrain_lr = params["pretrain_lr"]
        self.finetune_epochs = params["finetune_epochs"]
        self.finetune_lr = params["finetune_lr"]

    def stop(self):
        with QtCore.QMutexLocker(self.mutex):
            self.stop_flg = True

    def run(self):
        with QtCore.QMutexLocker(self.mutex):
            self.stop_flg = False
        self.started.emit()

        for i, y in enumerate(self.gen):
            if i % self.pretrain_step == 0:
                # pretrain
                avg_cost = self.bed.pretrain(self.pretrain_epochs, pretraining_lr=self.pretrain_lr)
                print("   pretrain cost: {}".format(avg_cost))

            # predict
            y_pred = self.bed.predict()
            print("{}: y={}, y_pred={}".format(i, y, y_pred))
            self.vis.append(y, y_pred)

            # finetune
            self.bed.supply(y)
            avg_cost = self.bed.finetune(self.finetune_epochs, finetunning_lr=self.finetune_lr)
            # bed.finetune(100, finetunning_lr=0.01)
            # bed.finetune(100, finetunning_lr=0.001)
            print("   train cost: {}".format(avg_cost))
            time.sleep(self.delay)

            self.updated.emit()

            if self.stop_flg:
                print(" --- iteration end ---")
                break

        self.stopped.emit()