Esempio n. 1
0
 def fit(self, inputs, target, epochs):
     for i in range(epochs):
         with self.train():
             value = self.forward(inputs)
         loss = rm.mean_squared_error(value, target)
         loss.grad().update(rm.Adam(lr=0.001))
     return loss
Esempio n. 2
0
    def __init__(self,
                 class_map=None,
                 imsize=(224, 224),
                 load_pretrained_weight=False,
                 train_whole_network=False):

        # make int into array
        if isinstance(imsize, int):
            imsize = (imsize, imsize)
        assert (imsize[0] / 32.) % 1 == 0 and (imsize[1] / 32.) % 1 == 0, \
            "TernausNet only accepts 'imsize' arguments that are multiples of 32. \
              ex: imsize=(320, 320)"

        self.decay_rate = 0
        self._opt = rm.Adam(1e-4)
        self._model = CNN_TernausNet(1)

        super(TernausNet, self).__init__(class_map,
                                         imsize,
                                         load_pretrained_weight,
                                         train_whole_network,
                                         load_target=self._model)
        self._model.final._channel = self.num_class
        self._freeze()
    rm.LeakyRelu(),
    #rm.BatchNormalize(),
    rm.Dense(28 * 28),
    rm.Sigmoid()
])

ae = AAE(enc_base,
         dec,
         batch_size,
         latent_dim=latent_dim,
         hidden=200,
         prior=model_dist,
         mode=model_type,
         label_dim=10)

dis_opt = rm.Adam(lr=0.0005, b=0.5)
enc_opt = rm.Adam(lr=0.0005, b=0.5)

N = len(x_train)
curve = []
for e in range(epoch):
    if not train:
        continue
    perm = permutation(N)
    batch_history = []
    k = 1
    for offset in range(0, N, batch_size):
        idx = perm[offset:offset + batch_size]
        s = time()
        train_data = x_train[idx]
        with ae.train():
            #rm.LeakyRelu(),
            rm.Dense(28 * 28),
            rm.Sigmoid()
        ])

    def forward(self, x, eps=1e-3):
        nb = len(x)
        self.z_mu = self.enc(x)
        self.decd = self.dec(self.z_mu)
        self.reconE = rm.mean_squared_error(self.decd, x)
        return self.decd


ae = AE(latent_dim=latent_dim)

ae_opt = rm.Adam()

N = len(x_train)
curve = []
for e in range(epoch):
    perm = permutation(N)
    batch_loss = []
    for offset in range(0, N, batch_size):
        idx = perm[offset:offset + batch_size]
        s = time()
        with ae.train():
            ae(x_train[idx])
            l = ae.reconE
        s = time() - s
        l.grad().update(ae_opt)
        batch_loss.append([l, s])
Esempio n. 5
0
x_train = x_train * 2 - 1
x_test = x_test * 2 - 1

set_cuda_active(True)
seed(10)

latent_dim = 200
epoch = 30
batch_size = 256

gen = Gen(latent_dim=latent_dim, batch_normal=True)
dis = Dis()
dcgan = DCGAN(gen, dis)

GAN_dis_opt = rm.Adam()
GAN_gen_opt = rm.Adam()

N = len(x_train)
curve = []
for e in range(epoch):
    perm = permutation(N)
    batch_loss = []
    real = []
    fake = []
    for b, offset in enumerate(range(0, N, batch_size)):
        idx = perm[offset:offset + batch_size]
        s = time()
        with dcgan.train():
            l = dcgan(x_train[idx])
        s = time() - s
Esempio n. 6
0
        history = self._epoch(opt, x_train, x_test, y_train, y_test)


if __name__ == '__main__':
    data = np.load('data/mnist.npy')
    y_train = data[0][0]
    x_train = data[0][1].astype('float32') / 255.
    y_test = data[1][0]
    x_test = data[1][1].astype('float32') / 255.
    x_train = x_train.reshape(-1, 28 * 28)
    x_test = x_test.reshape(-1, 28 * 28)
    random.seed(10)
    latent_dim = 2
    epoch = 20
    batch_size = 256
    opt = rm.Adam()
    ae = network((batch_size, 28 * 28),
                 epoch=epoch,
                 latent_dim=latent_dim,
                 lr_ch=(10, 1.2))
    ae.train(opt, x_train, x_test, y_train, y_test)

    _, z_train, xz_train = ae.mini_batch(opt, x_train, inference=True)
    f = Mahalanobis(z_train, y_train)
    _, z_test, xz_test = ae.mini_batch(opt, x_test, inference=True)
    f.set_th(0.9998)
    pred = np.argmin(f.predict(z_test), 1)
    print(confusion_matrix(y_test, pred))
    print(classification_report(y_test, pred))
"""
(py3) yamagishiyouheinoMacBook-Pro-2:vae_classifier yamagishi$ python src/network.py
Esempio n. 7
0
        rm.Dense(hidden),
        rm.Relu(),
        rm.BatchNormalize(),
        rm.Dense(hidden, initializer=Uniform()),
        rm.Relu()
    ])
    enc = Enc(enc_pre, latent_dim)
    dec = rm.Sequential([
        rm.Dense(hidden),
        rm.Relu(),
        rm.BatchNormalize(),
        rm.Dense(28 * 28),
        rm.Sigmoid(),
    ])
vae = VAE(enc, dec, latent_dim)
optimizer = rm.Adam()

N = len(x_train)
history = []
for e in range(epoch):
    perm = permutation(N)
    batch_history = []
    vae.set_models(inference=False)
    for offset in range(0, N, batch_size):
        idx = perm[offset:offset + batch_size]
        s = time()
        tmp = x_train[idx]
        train_data = np.zeros(batch_shape)
        train_data[:len(tmp)] = tmp
        with vae.train():
            vae(train_data)
Esempio n. 8
0
    def __init__(self,
                 env,
                 actor_network,
                 critic_network,
                 loss_func=None,
                 actor_optimizer=None,
                 critic_optimizer=None,
                 gamma=0.99,
                 tau=0.001,
                 buffer_size=1e6,
                 logger=None):
        super(DDPG, self).__init__()
        if loss_func is None:
            loss_func = rm.MeanSquaredError()
        if actor_optimizer is None:
            actor_optimizer = rm.Adam(0.0001)
        if critic_optimizer is None:
            critic_optimizer = rm.Adam(0.001)

        self._actor = actor_network
        self._target_actor = copy.deepcopy(self._actor)
        self._critic = critic_network
        self._target_critic = copy.deepcopy(self._critic)
        self.env = env
        self.loss_func = loss_func
        self._actor_optimizer = actor_optimizer
        self._critic_optimizer = critic_optimizer
        self.buffer_size = buffer_size
        self.gamma = gamma
        self.tau = tau

        if isinstance(env, BaseEnv):
            action_shape = env.action_shape
            state_shape = env.state_shape
            if not hasattr(action_shape, "__getitem__"):
                action_shape = (action_shape, )
            if not hasattr(state_shape, "__getitem__"):
                state_shape = (state_shape, )
        else:
            raise Exception("Argument env must be a object of BaseEnv class.")

        # Check env object
        # Check sample method.
        if isinstance(env, BaseEnv):
            sample = self.env.sample()
        else:
            raise Exception("Argument env must be a object of BaseEnv class.")

        assert isinstance(sample, np.ndarray), \
            "Sampled action from env object must be numpy ndarray. Actual is {}".format(
                type(sample))

        # Check state and action shape
        assert state_shape == self.env.reset().shape, \
            "Expected state shape is {}. Actual is {}.".format(state_shape, self.env.reset().shape)
        action_sample = self._actor(np.zeros((1, *state_shape))).as_ndarray()
        assert action_sample.shape[1:] == action_shape, \
            "Expected state shape is {}. Actual is {}.".format(
                action_shape, action_sample.shape[1:])
        #####
        self.action_size = action_shape
        self.state_size = state_shape
        self._buffer = ReplayBuffer(self.action_size, self.state_size,
                                    buffer_size)
        self._initialize()

        # logger
        logger = DDPGLogger() if logger is None else logger
        assert isinstance(logger,
                          Logger), "Argument logger must be Logger class"
        logger._key_check(log_key=_ddpg_keys, log_key_epoch=_ddpg_keys_epoch)
        self.logger = logger
Esempio n. 9
0
    dec = EncoderDecoder(latent_dimension, (2, 2), units=6,
                         depth=3)  #, batch_normal=True)
else:
    enc = DenseNet(2, (2, latent_dimension),
                   units=20,
                   growth_rate=20,
                   depth=4,
                   dropout=True)
    dec = DenseNet(latent_dimension, (2, 2),
                   units=20,
                   growth_rate=20,
                   depth=4,
                   dropout=True)
vae = Vae(enc, dec)

optimizer = rm.Adam()  #Sgd(lr=0.01, momentum=0.)
plt.clf()
epoch_splits = 10
epoch_period = epoch // epoch_splits
fig, ax = plt.subplots(epoch_splits, 3, figsize=(16, epoch_splits * 8))
if batch_size == 'Full':
    batch_size = len(x_train)

curve = []
neighbor_period = []
for e in range(epoch):
    s = time()
    perm = np.random.permutation(len(x_train))
    batch_loss = []
    for i in range(0, len(x_train), batch_size):
        idx = perm[i:i + batch_size]
            i += 1
            hidden = rm.tanh(hidden)
            hidden = layers[i](hidden)
            i += 1
            if self.dropout:
                hidden = rm.dropout(hidden)
            hidden = rm.concat(main_stream, hidden)
        #print(hidden.shape)
        return self.output(hidden)

# growth_rate = 2
# depth = 8 
# mymodel perform 1 epoch @ 0.30 sec
# mymodel_recursive perform 1 epoch @ 0.22 sec
func_model = mymodel_recursive(1, 1, growth_rate=2, depth=4, dropout=False)
optimizer = rm.Adam()#Sgd(lr=0.2, momentum=0.6)
plt.clf()
epoch_splits = 10
epoch_period = epoch // epoch_splits
fig, ax = plt.subplots(epoch_splits, 2, 
figsize=(8, epoch_splits*4))
if batch_size == 'Full':
    batch_size = len(train_x)

curve = [[], []]
neighbor_period = []
for e in range(epoch):
    s = time()
    perm = np.random.permutation(len(train_x))
    batch_loss = []
    for i in range(0, len(train_x), batch_size):
Esempio n. 11
0
                         batch_normal=True,
                         dropout=True)
else:
    enc = DenseNet(2, (2, latent_dimension),
                   units=200,
                   growth_rate=100,
                   depth=4,
                   dropout=True)
    dec = DenseNet(latent_dimension, (2, 2),
                   units=200,
                   growth_rate=100,
                   depth=4,
                   dropout=True)
vae = Vae(enc, dec)

optimizer = rm.Adam()  #Sgd(lr=0.1, momentum=.4)
plt.clf()
epoch_splits = 10
epoch_period = epoch // epoch_splits
fig, ax = plt.subplots(epoch_splits, 3, figsize=(16, epoch_splits * 8))
if batch_size == 'Full':
    batch_size = len(train_x)

curve = []
neighbor_period = []
for e in range(epoch):
    s = time()
    perm = np.random.permutation(len(train_x))
    batch_loss = []
    for i in range(0, len(train_x), batch_size):
        idx = perm[i:i + batch_size]
Esempio n. 12
0
    def train(self,
              env,
              loss_func=rm.ClippedMeanSquaredError(),
              optimizer_critic=rm.Adam(lr=0.0001),
              optimizer_actor=rm.Adam(lr=0.0001),
              episode=100,
              batch_size=32,
              random_step=1000,
              one_episode_step=5000,
              test_step=1000,
              test_env=None,
              update_period=10000,
              greedy_step=1000000,
              min_greedy=0.1,
              max_greedy=0.9,
              exploration_rate=1.,
              test_greedy=0.95,
              callbacks=None):

        greedy = min_greedy
        g_step = (max_greedy - min_greedy) / greedy_step

        if test_env is None:
            test_env = env

        print("Execute random action for %d step..." % random_step)
        for r in range(random_step):
            action = np.random.rand(*self._action_size)
            prestate, action, reward, state, terminal = env(action)
            if prestate is not None:
                self._buffer.store(prestate, np.array(action),
                                   np.array(reward), state, np.array(terminal))
        state = None
        prestate = None
        count = 0
        for e in range(episode):
            loss = 0
            tq = tqdm(range(one_episode_step))
            for j in range(one_episode_step):
                action = np.atleast_2d(self.action(state[None, ...])) + \
                    np.random.randn(batch_size, self._action_size) * (1 - greedy) * exploration_rate
                prestate, action, reward, state, terminal = env(action)
                greedy += g_step
                greedy = np.clip(greedy, min_greedy, max_greedy)
                if prestate is not None:
                    self._buffer.store(prestate, np.array(action),
                                       np.array(reward), state,
                                       np.array(terminal))

                # Training
                train_prestate, train_action, train_reward, train_state, train_terminal = \
                    self._buffer.get_minibatch(batch_size)

                target = np.zeros((batch_size, self._action_size),
                                  dtype=state.dtype)
                for i in range(batch_size):
                    target[i, train_action[
                        i, 0].astype(np.integer)] = train_reward[i]

                self._target_actor.set_models(inference=True)
                self._target_critic.set_models(inference=True)
                action_state_value = self._target_critic(
                    train_state, self._target_actor(train_state))
                target += (action_state_value * self._ganma *
                           (~train_terminal[:, None])).as_ndarray()

                self._actor.set_models(inference=True)
                self._critic.set_models(inference=False)
                with self._critic.train():
                    z = self._critic(train_prestate,
                                     self._actor(train_prestate))
                    ls = loss_func(z, target)

                with self._actor.prevent_upadate():
                    ls.grad().update(optimizer_critic)

                self._actor.set_models(inference=True)
                self._critic.set_models(inference=False)
                with self._critic.train():
                    z = self._critic(train_prestate,
                                     self._actor(train_prestate))

                with self._actor.prevent_upadate():
                    z.grad(-1.).update(optimizer_actor)

                loss += ls.as_ndarray()
                if count % update_period == 0:
                    self.update()
                    count = 0
                count += 1
                tq.set_description("episode {:03d} loss:{:6.4f}".format(
                    e, float(ls.as_ndarray())))
                tq.update(1)
            tq.set_description("episode {:03d} avg loss:{:6.4f}".format(
                e,
                float(loss) / (j + 1)))
            tq.update(0)
            tq.refresh()
            tq.close()

            # Test
            state = None
            sum_reward = 0

            for j in range(test_step):
                if state is not None:
                    action = self.action(state) +\
                        np.random.randn(batch_size, self._action_size) * \
                        (1 - test_step) * exploration_rate
                prestate, action, reward, state, terminal = test_env(action)
                sum_reward += float(reward)

            tq.write("    /// Result")
            tq.write("    Average train error:{:1.6f}".format(
                float(loss) / one_episode_step))
            tq.write("    Test reward:{}".format(sum_reward))
            tq.write("    Greedy:{:1.4f}".format(greedy))
            tq.write("    Buffer:{}".format(len(self._buffer)))

            if isinstance(callbacks, dict):
                func = callbacks.get("end_episode", False)
                if func:
                    func()

            sleep(0.25)  # This is for jupyter notebook representation.
Esempio n. 13
0
    rm.Dense(28 * 28),
    rm.Sigmoid()
])

ae = AAE(
    enc_base,
    dec,
    batch_size,
    mode='clustering',
    prior='normal',
    label_dim=10,
    latent_dim=latent_dim,
    hidden=200,
)

dis_opt = rm.Adam(lr=0.001, b=0.5)
enc_opt = rm.Adam(lr=0.001, b=0.5)
cds_opt = rm.Adam(lr=0.001, b=0.9)

outdir = 'result/{}/{}'.format(target, model_id)
if not path.exists(outdir):
    makedirs(outdir)

N = len(x_train)
curve = []
for e in range(epoch):
    if not train:
        continue
    perm = permutation(N)
    batch_history = []
    k = 1
    #rm.BatchNormalize(),
    rm.Dense(28 * 28),
    rm.Sigmoid()
])

ae = AAE(
    enc_base,
    dec,
    batch_size,
    latent_dim=latent_dim,
    hidden=hidden,
    prior=model_dist,
    mode=model_type,
)

dist_opt = rm.Adam(lr=0.001, b=0.5)
opt = rm.Adam(lr=0.001, b=0.5)

N = len(x_train)
batch_shape = (batch_size, 784)
history = []
for e in range(1, epoch + 1):
    perm = permutation(N)
    batch_history = []
    for offset in range(0, N, batch_size):
        idx = perm[offset:offset + batch_size]
        start_time = time()
        train_data = np.zeros(batch_shape)
        tmp = x_train[perm[idx]]
        train_data[:len(tmp)] = tmp
        with ae.train():
Esempio n. 15
0
dis = rm.Sequential([
    #rm.Dense(hidden), #rm.LeakyRelu(),
    #rm.BatchNormalize(),
    rm.Dense(hidden), #rm.LeakyRelu(),
    rm.Dense(1), rm.Sigmoid()
])


gan = GAN(gen, dis, latent_dim=gan_dim)
initial_lr = 0.001
last_lr = initial_lr/1000
_b = .5 # default is .9
_c = 0.5 # center
_m = 0.2 # margin
down_rate = 1.02
gen_opt = rm.Adam(lr=initial_lr, b=_b)
dis_opt = rm.Adam(lr=initial_lr, b=_b)
gen_lr = np.linspace(initial_lr, last_lr, epoch // shot_period + 1)
dis_lr = np.linspace(initial_lr, last_lr, epoch // shot_period + 1)

N = len(x_train)
batch_shape = batch_size, series
history = []
_train = np.zeros(batch_shape)
for e in range(epoch):
    code = '|'
    if 0:#e % shot_freq == shot_freq - 1:
        _gen_lr = gen_lr[e//shot_freq]
        _dis_lr = dis_lr[e//shot_freq]
        print("###{}/{}###".format(_gen_lr, _dis_lr))
        gen_opt._lr = _gen_lr