Esempio n. 1
0
    def test_value_manipulation(self):
        val = np.random.random((4, 2))
        xth = KTH.variable(val)
        xtf = KTF.variable(val)

        # get_value
        valth = KTH.get_value(xth)
        valtf = KTF.get_value(xtf)
        assert valtf.shape == valth.shape
        assert_allclose(valth, valtf, atol=1e-05)

        # set_value
        val = np.random.random((4, 2))
        KTH.set_value(xth, val)
        KTF.set_value(xtf, val)

        valth = KTH.get_value(xth)
        valtf = KTF.get_value(xtf)
        assert valtf.shape == valth.shape
        assert_allclose(valth, valtf, atol=1e-05)

        # count_params
        assert KTH.count_params(xth) == KTF.count_params(xtf)

        # print_tensor
        check_single_tensor_operation('print_tensor', ())
        check_single_tensor_operation('print_tensor', (2, ))
        check_single_tensor_operation('print_tensor', (4, 3))
        check_single_tensor_operation('print_tensor', (1, 2, 3))

        val = np.random.random((3, 2))
        xth = KTH.variable(val)
        xtf = KTF.variable(val)
        assert KTH.get_variable_shape(xth) == KTF.get_variable_shape(xtf)
Esempio n. 2
0
    def on_epoch_end(self, epoch, logs={}):
        K.set_value(self.w_cla,
                    (-self.sigmoid[epoch] + 1) * self.scale_c[epoch])
        K.set_value(self.w_dec, (self.sigmoid[epoch]) * self.scale_d[epoch])

        self.w_decs.append(K.get_value(self.w_dec))
        self.w_clas.append(K.get_value(self.w_cla))
Esempio n. 3
0
    def test_value_manipulation(self):
        val = np.random.random((4, 2))
        xth = KTH.variable(val)
        xtf = KTF.variable(val)

        # get_value
        valth = KTH.get_value(xth)
        valtf = KTF.get_value(xtf)
        assert valtf.shape == valth.shape
        assert_allclose(valth, valtf, atol=1e-05)

        # set_value
        val = np.random.random((4, 2))
        KTH.set_value(xth, val)
        KTF.set_value(xtf, val)

        valth = KTH.get_value(xth)
        valtf = KTF.get_value(xtf)
        assert valtf.shape == valth.shape
        assert_allclose(valth, valtf, atol=1e-05)

        # count_params
        assert KTH.count_params(xth) == KTF.count_params(xtf)

        # print_tensor
        check_single_tensor_operation('print_tensor', ())
        check_single_tensor_operation('print_tensor', (2,))
        check_single_tensor_operation('print_tensor', (4, 3))
        check_single_tensor_operation('print_tensor', (1, 2, 3))

        val = np.random.random((3, 2))
        xth = KTH.variable(val)
        xtf = KTF.variable(val)
        assert KTH.get_variable_shape(xth) == KTF.get_variable_shape(xtf)
Esempio n. 4
0
 def on_epoch_end(self, epoch, logs={}):
     if epoch > self.klstart:  #grows linearly towards one
         new_weight = min(
             K.get_value(self.kl_weight) + (1 / self.kl_annealtime), 1.)
         print(new_weight)
         K.set_value(self.kl_weight, new_weight)
     print("Current KL Weight is " + str(K.get_value(self.kl_weight)))
Esempio n. 5
0
 def on_epoch_end(self, epoch, logs={}):
     if epoch > self.klstart:  #grows linearly towards one
         new_weight = min(
             K.get_value(self.kl_weight) +
             (self.max_kl_weight / self.kl_annealtime), self.max_kl_weight)
         #print(new_weight)
         K.set_value(self.kl_weight, new_weight)
Esempio n. 6
0
    def reset_states(self, states_value=None):
        if len(self.states) == 0:
            return
        if not self.stateful:
            raise AttributeError('Layer must be stateful.')
        if not hasattr(self, 'states') or self.states[0] is None:
            state_shapes = list(map(K.int_shape, self.model.input[1:]))
            self.states = list(map(K.zeros, state_shapes))

        if states_value is not None:
            if type(states_value) not in (list, tuple):
                states_value = [states_value] * len(self.states)
            assert len(states_value) == len(
                self.states), 'Your RNN has ' + str(len(
                    self.states)) + ' states, but was provided ' + str(
                        len(states_value)) + ' state values.'
            if 'numpy' not in type(states_value[0]):
                states_value = list(map(np.array, states_value))
            if states_value[0].shape == tuple():
                for state, val in zip(self.states, states_value):
                    K.set_value(state, K.get_value(state) * 0. + val)
            else:
                for state, val in zip(self.states, states_value):
                    K.set_value(state, val)
        else:
            if self.state_initializer:
                for state, init in zip(self.states, self.state_initializer):
                    if isinstance(init, initializers.Zeros):
                        K.set_value(state, 0 * K.get_value(state))
                    else:
                        K.set_value(state,
                                    K.eval(init(K.get_value(state).shape)))
            else:
                for state in self.states:
                    K.set_value(state, 0 * K.get_value(state))
Esempio n. 7
0
    def _build_models(self, hp_lambda=1., lr=0.001):

        # Input images from both domains
        img = Input(shape=self.img_shape)

        self.e = self._build_extracter()
        self.c = self._build_classifier()
        self.d = self._build_discriminator()

        f = self.e(img)
        gradInv = self.gradInv = GradientReversal(hp_lambda=hp_lambda)
        K.set_value(gradInv.hp_lambda, hp_lambda)
        fInv = gradInv(f)

        cls = self.c(f)
        dom = self.d(fInv)

        self.model = Model(inputs=img, outputs=cls, name="model")
        self.compile(self.model, lr, name='classifier')

        self.classifier = Model(inputs=img, outputs=cls, name="classifier")
        self.compile(self.classifier, lr, name='classifier')

        self.discriminator = Model(inputs=img, outputs=dom, name="discriminator")
        self.compile(self.discriminator, lr * 0.1, name='discrimimator')
Esempio n. 8
0
    def on_epoch_end(self, epoch, logs={}):
        tau = np.mod(epoch, int(self.T / self.M)) * 1.0 / int(self.T / self.M)
        if tau >= self.R:
            new_weight = self.max_kl_weight  #capped at 0.0002, because posterior collapose happens!
        else:
            new_weight = tau * self.max_kl_weight

        K.set_value(self.kl_weight, new_weight)
Esempio n. 9
0
    def on_epoch_end(self, epoch, logs={}):
        tau = np.mod(epoch, int(self.T / self.M)) * 1.0 / int(self.T / self.M)
        if tau >= self.R:
            new_weight = 1.0 / 5000  #capped at 0.0002, because posterior collapose happens!
        else:
            new_weight = tau / 5000

        K.set_value(self.kl_weight, new_weight)
        print("Current KL Weight is " + str(K.get_value(self.kl_weight)))
Esempio n. 10
0
 def on_batch_begin(self, batch, logs=None):
     lr = cosine_decay_with_warmup(global_step=self.global_step,
                                   learning_rate_base=self.learning_rate_base,
                                   total_steps=self.total_steps,
                                   warmup_learning_rate=self.warmup_learning_rate,
                                   warmup_steps=self.warmup_steps,
                                   hold_base_rate_steps=self.hold_base_rate_steps)
     K.set_value(self.model.optimizer.lr, lr)
     if self.verbose > 0:
         print('\nBatch %05d: setting learning '
               'rate to %s.' % (self.global_step + 1, lr))
Esempio n. 11
0
    def test_value_manipulation(self):
        val = np.random.random((4, 2))
        xth = KTH.variable(val)
        xtf = KTF.variable(val)

        # get_value
        valth = KTH.get_value(xth)
        valtf = KTF.get_value(xtf)
        assert valtf.shape == valth.shape
        assert_allclose(valth, valtf, atol=1e-05)

        # set_value
        val = np.random.random((4, 2))
        KTH.set_value(xth, val)
        KTF.set_value(xtf, val)

        valth = KTH.get_value(xth)
        valtf = KTF.get_value(xtf)
        assert valtf.shape == valth.shape
        assert_allclose(valth, valtf, atol=1e-05)

        # count_params
        assert KTH.count_params(xth) == KTF.count_params(xtf)
Esempio n. 12
0
    def on_epoch_end(self, epoch, logs={}):

        if (self.current <= (self.l_period / 2)):
            self.current = self.current + 1
            K.set_value(self.w_cla, 0)
            K.set_value(self.w_dec, 1)
        else:
            self.current = self.current + 1
            K.set_value(self.w_cla, 1)
            K.set_value(self.w_dec, 0)

        if (self.current == self.l_period):
            self.current = 0

        self.w_decs.append(K.get_value(self.w_dec))
        self.w_clas.append(K.get_value(self.w_cla))
Esempio n. 13
0
    def test_value_manipulation(self):
        val = np.random.random((4, 2))
        xth = KTH.variable(val)
        xtf = KTF.variable(val)

        # get_value
        valth = KTH.get_value(xth)
        valtf = KTF.get_value(xtf)
        assert valtf.shape == valth.shape
        assert_allclose(valth, valtf, atol=1e-05)

        # set_value
        val = np.random.random((4, 2))
        KTH.set_value(xth, val)
        KTF.set_value(xtf, val)

        valth = KTH.get_value(xth)
        valtf = KTF.get_value(xtf)
        assert valtf.shape == valth.shape
        assert_allclose(valth, valtf, atol=1e-05)

        # count_params
        assert KTH.count_params(xth) == KTF.count_params(xtf)
Esempio n. 14
0
    def train(self, batch_size=16, train_Steps=100, val_Steps=5, nEpochs=100):
        gen_S_train = DataGenerator(self.n_classes, self.img_res, self.st_path, batch_size)
        gen_S_val = DataGenerator(self.n_classes, self.img_res, self.sv_path, batch_size)
        gen_T_train = DataGenerator(self.n_classes, self.img_res, self.tt_path, batch_size)
        gen_T_val = DataGenerator(self.n_classes, self.img_res, self.tv_path, batch_size)

        gen_S_train.on_epoch_end()
        gen_T_train.on_epoch_end()

        prev = 0
        for epoch in range(nEpochs):
            # setting parameters
            K.set_value(self.gradInv.hp_lambda, self._hp_scheduler(epoch, nEpochs))
            lr = self._lr_scheduler(epoch, nEpochs)
            self.compile(self.classifier, lr, name='classifier')
            self.compile(self.discriminator, lr*0.1, name='discrimimator')

            # train
            res_S_train = np.array([0., 0.])
            res_d_train = np.asarray([0., 0.])
            for batch in tqdm.tqdm(range(train_Steps)):
                xSt, ySt = gen_S_train.next()
                lSt = self.classifier.train_on_batch(xSt, ySt)
                res_S_train += np.asarray(lSt)

                xTt, _ = gen_T_train.next()
                dSt = np.zeros((batch_size))
                dTt = np.ones((batch_size))

                ldt = self.discriminator.train_on_batch(np.concatenate((xSt, xTt)), np.concatenate((dSt, dTt)))
                res_d_train += np.asarray(ldt)

            los_S_train, acc_S_train = res_S_train / train_Steps
            los_d_train, acc_d_train = res_d_train / train_Steps

            # valid
            res_S_val = np.array([0., 0.])
            res_T_val = np.asarray([0., 0.])
            res_d_val = np.asarray([0., 0.])
            for batch in range(val_Steps):
                xSv, ySv = gen_S_val.next()
                lSv = self.classifier.test_on_batch(xSv, ySv)
                res_S_val += np.asarray(lSv)

                xTv, yTv = gen_T_val.next()
                lTv = self.classifier.test_on_batch(xTv, yTv)
                res_T_val += np.asarray(lTv)

                dSv = np.zeros((batch_size))
                dTv = np.ones((batch_size))

                ld = self.discriminator.test_on_batch(np.concatenate((xSv, xTv)), np.concatenate((dSv, dTv)))
                res_d_val += np.asarray(ld)

            los_S_val, acc_S_val = res_S_val / val_Steps
            los_T_val, acc_T_val = res_T_val / val_Steps
            los_d_val, acc_d_val = res_d_val / val_Steps

            gen_S_val.on_epoch_end()
            gen_T_val.on_epoch_end()

            val_lr_lambda = K.get_value(self.classifier.optimizer.lr)
            val_hp_lambda = K.get_value(self.gradInv.hp_lambda)

            print()
            print("[Epoch %d, lr=%.1e, lambda=%.1e]" % (epoch, val_lr_lambda, val_hp_lambda))
            print("<Train> [Dom loss: %.2f, acc: %3d%%] [Cls(S) loss: %.2f, acc: %3d%%]"
                  % (los_d_train, acc_d_train*100.,
                     los_S_train, acc_S_train*100.,))
            print("<Val>   [Dom loss: %.2f, acc: %3d%%] [Cls(S) loss: %.2f, acc: %3d%%] [Cls(T) loss: %.2f, acc: %3d%%]"
                  % (los_d_val, acc_d_val*100.,
                     los_S_val, acc_S_val*100.,
                     los_T_val, acc_T_val*100.,))

            # save weights when Target loss is the minimum
            if acc_T_val >= prev:
                self.classifier.save_weights(os.path.join(self.output_name, 'dann_acc-{:.2f}_loss-{:.2f}.hdf5'.format(acc_T_val, los_T_val)))
                prev = acc_T_val

            # draw graph
            outlines = []
            outlines.append(epoch)
            outlines.append(val_lr_lambda)
            outlines.append(val_hp_lambda)

            outlines.append(los_d_train)
            outlines.append(acc_d_train*100.)
            outlines.append(los_S_train)
            outlines.append(acc_S_train*100.)
            outlines.append(los_d_val)
            outlines.append(acc_d_val*100.)
            outlines.append(los_S_val)
            outlines.append(acc_S_val*100.)
            outlines.append(los_T_val)
            outlines.append(acc_T_val*100.)

            self.output_file.write(",".join([str(x) for x in outlines])+"\n")
            self.output_file.flush()

            d = pd.read_csv(self.output_name+"/progress.csv")
            if d.shape[0] == 1:
                continue
            d = d.interpolate()
            p = d.plot(x="epoch", y=["acc_d_v", "acc_c_v_s", "acc_c_v_t"])
            fig = p.get_figure()
            fig.savefig(self.output_name+"/graph.png")
            plt.close()
Esempio n. 15
0
 def on_epoch_end(self, epoch, logs={}):
     new_beta = max(K.get_value(self.beta) * self.decay, self.min_beta)
     K.set_value(self.beta, new_beta)
     print("Current beta is " + str(K.get_value(self.beta)))
Esempio n. 16
0
agent = DQNAgent(env, max_eps=1, period=0, state_mode=DQNAgent.StateModel.VN_ONLY, gamma=0.8, model=create_model_10(env))
hist = agent.train()

hist


# In[13]:


plt.plot([x['reward'] for x in hist[0]])


# In[16]:


K.set_value(agent.model.optimizer.lr, 0.001)
hist = agent.train()


# In[17]:


plt.plot([x['reward'] for x in hist[0]])


# In[20]:


K.set_value(agent.model.optimizer.lr, 0.0001)
hist = agent.train()
Esempio n. 17
0
if load_checkpoint is not None:
    fm.loadModel(model, load_checkpoint)
    last_checkpoint = load_checkpoint
    model.reset_states()
    model.evaluate(XX_validation[:preamble],
                   YY_validation[:preamble],
                   batch_size=nbatch,
                   verbose=0)
    current_loss = model.evaluate(XX_validation[preamble:],
                                  YY_validation[preamble:],
                                  batch_size=nbatch,
                                  verbose=0)
    print "Loaded weights from %s: Learning rate = %.8f, Validation loss = %08.6f" % (
        load_checkpoint, learning_rate, current_loss)

KTF.set_value(model.optimizer.lr, learning_rate)

try:

    for iii in range(0, 100000):

        XXs, YYs = generateXYChunk(X, nbatch, epoch_timesteps, maxlen, length)

        model.reset_states()
        model.evaluate(XXs[:preamble],
                       YYs[:preamble],
                       batch_size=nbatch,
                       verbose=0)
        model.fit(XXs[preamble:],
                  YYs[preamble:],
                  batch_size=nbatch,