示例#1
0
import numpy as np
import os

from scipy.ndimage.measurements import label
from forkan.common.config_manager import ConfigManager
from forkan.datasets import load_dataset
from forkan.common.utils import create_dir
"""
For every position of object 1, the activation heatmaps for every position of obejct 2 are computed
and saved as an image. Positions where objects overlap are assigned -1 as activation.
"""

logger = logging.getLogger(__name__)

save_path = os.environ['HOME'] + '/.keras/forkan/figures/duo_heat/'
create_dir(save_path)

cm = ConfigManager()
model = cm.restore_model('vae-duo', with_dataset=False)
dataset = load_dataset('translation')

# we only want the training set
x_train = dataset[0]

# get image size
image_size = x_train.shape[1]

# reshape for better indexing
x_train = np.reshape(x_train, [3, 32, 32, image_size, image_size])

i = 0
示例#2
0
文件: __init__.py 项目: tik0/forkan
            'level': logging.DEBUG
        }
    },
    root={
        'handlers': ['h'],
        'level': logging.DEBUG,
    },
)

# config for coloredlogs
field_styles = coloredlogs.DEFAULT_FIELD_STYLES
fmt = '%(asctime)s [%(levelname)-8s] %(name)-4s %(message)s'
datefmt = '%H:%M'

# surpress matplotlib debug bloat
logging.getLogger('matplotlib').setLevel(logging.WARNING)

dictConfig(logging_config)
logger = logging.getLogger(__name__)

coloredlogs.install(level='DEBUG', fmt=fmt, datefmt=datefmt)

for d in [weights_path, dataset_path, figure_path]:
    create_dir(d)

# set numpy seed
if fixed_seed:
    import numpy as np
    np.random.seed(0)
    logger.critical("Starting in fixed seed mode!")
示例#3
0
    # center sphere
    cr.arc(0, 0, 1, 0, 2 * np.pi)
    cr.set_source_rgb(0, 0, 0)
    cr.fill()

    # reshape, delete fourth (alpha) channel, greyscale and normalise
    return np.expand_dims(
        np.dot(
            np.frombuffer(surf.get_data(), np.uint8).reshape(
                [w, h, 4])[..., :3], [0.299, 0.587, 0.114]), -1) / 255


i = 0
for _ in range(reps):
    for theta in np.linspace(0, 2 * np.pi, theta_res):
        frame = _render_pendulum(theta)
        frames[i] = frame
        i += 1

print('dumping file')
np.savez_compressed('{}/pendulum-visual-uniform.npz'.format(dataset_path),
                    data=frames)

print('storing some pngs')
create_dir('{}/pendulum-uniform/'.format(dataset_path))
for n, f in enumerate(frames[40:60, ...]):
    scipy.misc.imsave(
        '{}/pendulum-uniform/frame{}.png'.format(dataset_path, n),
        np.squeeze(f))
print('done')
示例#4
0
    def _finalize_init(self):
        """ Call this at the end of childs __init__ to setup tensorboard and handle saved checkpoints """

        # init variables
        self.sess.run(tf.global_variables_initializer())

        # launch debug session
        if self.debug:
            self.sess = tf_debug.TensorBoardDebugWrapperSession(self.sess, "localhost:6064")

        # create tensorboard summaries
        if self.use_tensorboard:

            # clean previous runs or add new one
            if not self.clean_tensorboard_runs:
                rename_latest_run(self.tensorboard_dir)
            else:
                clean_dir(self.tensorboard_dir)

            # if there is a directory suffix given, it will be included before the run number in the filename
            tb_dir_suffix = '' if self.tensorboard_suffix is None else '-{}'.format(self.tensorboard_suffix)
            self.tensorboard_dir = '{}/run{}-latest'.format(self.tensorboard_dir, tb_dir_suffix)

            # call child method to do preparations
            self._setup_tensorboard()

            # this operation can be run in a tensorflow session and will return all summaries
            # created above.
            self.merge_op = tf.summary.merge_all()

            self.writer = tf.summary.FileWriter(self.tensorboard_dir,
                                                graph=tf.get_default_graph())

        # flag indicating whether this instance is completely trained
        self.is_trained = False

        # if this instance is working with checkpoints, we'll check whether
        # one is already there. if so, we continue training from that checkpoint,
        # i.e. load the saved weights into target and online network.
        if self.use_checkpoints:

            # remove old weights if needed and not already trained until the end
            if self.clean_previous_weights:
                self.logger.info('Cleaning weights ...')

                if os.path.isfile('{}/done'.format(self.checkpoint_dir)):
                    self.logger.critical('Successfully trained weights shall be deleted under \n\n'
                                         '{}/done\n\n'
                                         'This is most likely a misconfiguration. Either delete the done-file'
                                         ' or the weights themselves manually.'.format(self.checkpoint_dir))

                clean_dir(self.checkpoint_dir)

            # be sure that the directory exits
            create_dir(self.checkpoint_dir)

            # Saver objects handles writing and reading protobuf weight files
            self.saver = tf.train.Saver(var_list=tf.all_variables())

            # file handle for writing episode summaries
            self.csvlog = open('{}/progress.csv'.format(self.checkpoint_dir), 'a')

            # write headline if file is empty
            if os.stat('{}/progress.csv'.format(self.checkpoint_dir)).st_size == 0:
                self.csvlog.write('episode, epsilon, reward\n')

            # load already saved weights
            self._load()
示例#5
0
import logging

import numpy as np
import scipy

from forkan import dataset_path
from forkan.common.utils import create_dir
from forkan.datasets import load_atari_normalized

logger = logging.getLogger(__name__)

logger.info('loading dataset ...')
data = load_atari_normalized('breakout-small')
logger.info('done loading')

np.random.seed(0)
idxs = [5, 6, 7, 305711, 244444]
rand_frames = data[idxs]

print('dumping file')
np.savez_compressed('{}/breakout-eval.npz'.format(dataset_path),
                    data=rand_frames)

print('storing some pngs')

create_dir('{}/breakout-eval/'.format(dataset_path))

for n, f in enumerate(rand_frames[:, ...]):
    scipy.misc.imsave('{}/breakout-eval/frame{}.png'.format(dataset_path, n),
                      np.squeeze(f))
print('done')
示例#6
0
def classify_ball(ds_path,
                  name_prefix,
                  mlp_neurons=16,
                  val_split=0.2,
                  batch_size=128,
                  epochs=100):

    K.set_session(tf.Session())

    dataset_prefix = 'ball_latents_'
    ds = np.load(f'{dataset_path}{ds_path}.npz')
    home = os.environ['HOME']

    orgs = ds['originals']
    poss = ds['ball_positions']

    dt = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M')
    model_name = f'{name_prefix}-N{mlp_neurons}-{ds_path}-{dt}'

    model_save_path = f'{model_path}classify-ball/{model_name}'
    create_dir(model_save_path)

    csv = CSVLogger(
        f'{model_save_path}/progress.csv', *[
            'timestamp', 'nbatch', 'mae_train', 'mse_train', 'mae_test',
            'mse_test'
        ])

    if name_prefix == 'VAE':
        lats = ds['vae_latents']
    elif name_prefix == 'RETRAIN':
        lats = ds['latents']
    else:
        print(f'name {name_prefix} unknown!')
        print(0)

    model = Sequential([
        Dense(mlp_neurons, activation='relu', input_shape=(lats.shape[-1], )),
        Dense(mlp_neurons, activation='relu'),
        Dense(poss.shape[-1], activation='sigmoid')
    ])

    model.compile(optimizer=tf.train.AdamOptimizer(0.01),
                  loss='mse',
                  metrics=['mae'])

    sess = K.get_session()

    idxes = np.arange(lats.shape[0])
    np.random.shuffle(idxes)
    split_idx = int(lats.shape[0] * (1 - val_split))

    def draw_predicted_balls(imgs, locations, real_loc):
        imgs = imgs.copy()

        for n, img in enumerate(imgs):
            for j in [-1, 0, 1]:
                for i in [-1, 0, 1]:
                    x, y = np.clip(int((locations[n, 0] * 210) + j), 0,
                                   209), np.clip(
                                       int((locations[n, 1] * 160) + i), 0,
                                       159)
                    img[x, y] = [0, 200, 200]

                    x, y = np.clip(int((real_loc[n, 0] * 210) + j), 0,
                                   209), np.clip(
                                       int((real_loc[n, 1] * 160) + i), 0, 159)
                    img[x, y] = [200, 0, 200]

        return np.asarray(imgs, dtype=np.uint8)

    class TBCB(Callback):
        def __init__(self, m, ovo):
            self.mse_ph = tf.placeholder(tf.float32, (), name='mse-train')
            self.mae_ph = tf.placeholder(tf.float32, (), name='mae-train')

            self.val_mse_ph = tf.placeholder(tf.float32, (), name='mse-test')
            self.val_mae_ph = tf.placeholder(tf.float32, (), name='mae-test')

            self.im_ph = tf.placeholder(tf.uint8, (1, 210 * 3, 160 * 5, 3),
                                        name='pred-ball-pos-ph')

            tr_sum = []
            tr_sum.append(
                scalar_summary('mse-train', self.mse_ph, scope='train'))
            tr_sum.append(
                scalar_summary('mae-train', self.mae_ph, scope='train'))

            te_sum = []
            te_sum.append(
                scalar_summary('mse-test', self.val_mse_ph, scope='test'))
            te_sum.append(
                scalar_summary('mae-test', self.val_mae_ph, scope='test'))

            self.im_sum = tf.summary.image('pred-ball-pos', self.im_ph)
            self.mtr_sum = tf.summary.merge(tr_sum)
            self.mte_sum = tf.summary.merge(te_sum)

            self.fw = tf.summary.FileWriter(f'{home}/ball/{model_name}',
                                            graph=sess.graph)
            self.ovo = ovo
            self.step = 0
            self.m = m

        def on_batch_end(self, batch, logs={}):
            self.step += 1

            mse_t = logs['loss']
            mae_t = logs['mean_absolute_error']

            # this is usually only given on epoch end. may that resolution suffices?
            val_mse_t, val_mae_t = self.m.evaluate(x=self.validation_data[0],
                                                   y=self.validation_data[1])

            su, se = sess.run(
                [self.mtr_sum, self.mte_sum],
                feed_dict={
                    self.mse_ph: mse_t,
                    self.mae_ph: mae_t,
                    self.val_mse_ph: val_mse_t,
                    self.val_mae_ph: val_mae_t,
                })

            csv.writeline(
                datetime.datetime.now().isoformat(),
                self.step,
                mae_t,
                mse_t,
                val_mae_t,
                val_mse_t,
            )

            self.fw.add_summary(su, self.step)
            self.fw.add_summary(se, self.step)

        def on_epoch_end(self, epoch, logs=None):
            test_idxes = np.random.choice(self.validation_data[0].shape[0] - 1,
                                          15,
                                          replace=False)
            predicted_locations = model.predict(
                self.validation_data[0][test_idxes])
            imgs = draw_predicted_balls(self.ovo[test_idxes],
                                        predicted_locations,
                                        self.validation_data[1][test_idxes])

            r1 = np.concatenate(imgs[0:5], axis=1)
            r2 = np.concatenate(imgs[5:10], axis=1)
            r3 = np.concatenate(imgs[10:15], axis=1)

            img_mat = np.concatenate([r1, r2, r3], axis=0)

            img_sum = sess.run(self.im_sum, feed_dict={self.im_ph: [img_mat]})

            self.fw.add_summary(img_sum, self.step)

    model.fit(lats[idxes][:split_idx],
              poss[idxes][:split_idx],
              epochs=epochs,
              batch_size=batch_size,
              validation_data=(lats[idxes][split_idx:],
                               poss[idxes][split_idx:]),
              callbacks=[TBCB(model, orgs[idxes][split_idx:])])

    model.save_weights(f'{model_save_path}/weights.h5')
    csv.flush()
    del csv

    return None
示例#7
0
frames = np.zeros((int(TOTAL_FRAMES), 84, 84))

model, env = main(args, just_return=True)
obs = env.reset()

log.info('generating frames')
for step in tqdm(range(int(TOTAL_FRAMES))):
    actions, _, _, _ = model.step(obs)

    img = np.asarray(np.squeeze(obs[..., -1]) / 255, dtype=np.float32)

    frames[step, ...] = img

    obs, _, done, _ = env.step(actions)
    done = done.any() if isinstance(done, np.ndarray) else done

    if done:
        obs = env.reset()

log.info('dumping file')
name = args[1].replace('NoFrameskip', '').lower().split('-')[0]
np.savez_compressed('{}/{}-normalized.npz'.format(dataset_path, name), data=frames)

log.info('storing some example pngs for {}'.format(name))
create_dir('{}/{}/'.format(dataset_path, name))
for n, f in enumerate(frames[40:60, ...]):
    scipy.misc.imsave('{}/{}/frame{}.png'.format(dataset_path, name, n), np.squeeze(f))

log.info('done')
示例#8
0
    def train(self, dataset, batch_size=32, num_episodes=30, print_freq=10):
        num_samples = len(dataset)

        assert np.max(dataset) <= 1, 'provide normalized dataset!'

        # some sanity checks
        dataset = self._preprocess_batch(dataset)

        self.log.info('Training on {} samples for {} episodes.'.format(
            num_samples, num_episodes))
        tstart = time.time()
        nb = 1

        im_ph = tf.placeholder(tf.float32,
                               shape=np.multiply((1, ) + self.input_shape[1:],
                                                 [1, 3, 2, 1]))
        im_sum = tf.summary.image('img', im_ph)

        # rollout N episodes
        for ep in range(num_episodes):

            # shuffle dataset
            np.random.shuffle(dataset)

            for n, idx in enumerate(np.arange(0, num_samples, batch_size)):
                bps = max(int(nb / (time.time() - tstart)), 1)
                x = dataset[idx:min(idx + batch_size, num_samples), ...]

                _, loss, re_loss, kl_losses = self.s.run(
                    [self.train_op, self.vae_loss, self.re_loss, self.kl_loss],
                    feed_dict={self._input: x})

                # mean losses
                re_loss = np.mean(re_loss)
                kl_loss = self.beta * np.sum(kl_losses)

                if self.tb:
                    fd = {
                        self._input: x,
                        self.rel_ph: re_loss,
                        self.kll_ph: kl_loss,
                        self.bps_ph: bps,
                        self.ep_ph: ep,
                    }

                    for i, kph in enumerate(self.klls_ph):
                        fd.update({kph: kl_losses[i]})

                    suma = self.s.run(self.merge_op, feed_dict=fd)

                    self.writer.add_summary(suma, nb)

                # increase batch counter
                nb += 1

                self.csv.writeline(datetime.datetime.now().isoformat(), ep, nb,
                                   re_loss, kl_loss, *kl_losses)

                if n % print_freq == 0 and print_freq is not -1:

                    if self.tb:
                        du = x[np.random.choice(x.shape[0], 3)]
                        reca = self.reconstruct(du)
                        hori = []
                        for o in range(3):
                            hori.append(
                                np.concatenate((du[o], reca[o]), axis=1))
                        fin = np.concatenate(hori, axis=0)
                        isu = self.s.run(
                            im_sum,
                            feed_dict={im_ph: np.expand_dims(fin, axis=0)})
                        self.writer.add_summary(isu, nb)
                        self.writer.flush()

                    total_batches = (num_samples // batch_size) * num_episodes

                    perc = ((nb) / max(total_batches, 1)) * 100
                    steps2go = total_batches - nb
                    secs2go = steps2go / bps
                    min2go = secs2go / 60

                    hrs = int(min2go // 60)
                    mins = int(min2go) % 60

                    tab = tabulate([
                        ['name', f'{self.name}-b{self.beta}'],
                        ['episode', ep],
                        ['batch', n],
                        ['bps', bps],
                        ['rec-loss', re_loss],
                        ['kl-loss', kl_loss],
                        ['ETA', '{}h {}min'.format(hrs, mins)],
                        ['done', '{}%'.format(int(perc))],
                    ])

                    print('\n{}'.format(tab))

            self._save()

        newest = '{}/{}/'.format(self.parent_dir, self.name)
        self.log.info('done training!\ncopying files to {}'.format(newest))

        # create, clean & copy
        create_dir(newest)
        clean_dir(newest, with_files=True)
        copytree(self.savepath, newest)

        # as reference, we leave a file containing the foldername of the just copied model
        with open('{}from'.format(newest), 'a') as fi:
            fi.write('{}\n'.format(self.savepath.split('/')[-2]))
示例#9
0
    def __init__(self,
                 input_shape=None,
                 name='default',
                 network='atari',
                 latent_dim=20,
                 beta=1.0,
                 lr=1e-4,
                 zeta=1.0,
                 load_from=None,
                 session=None,
                 optimizer=tf.train.AdamOptimizer,
                 with_opt=True,
                 tensorboard=False):

        if input_shape is None:
            assert load_from is not None, 'input shape need to be given if no model is loaded'

        self.log = logging.getLogger('vae')

        if load_from is None:  # fresh vae
            # take care of correct input dim: (BATCH, HEIGHT, WIDTH, CHANNELS)
            # add channel dim if not provided
            if len(input_shape) == 2:
                input_shape = input_shape + (1, )

            self.latent_dim = latent_dim
            self.network = network
            self.beta = beta
            self.name = name
            self.zeta = zeta
            self.lr = lr

            # add batch dim
            self.input_shape = (None, ) + input_shape

            self.savename = '{}-b{}-z{}-lat{}-lr{}-{}'.format(
                name, beta, zeta, latent_dim, lr,
                datetime.datetime.now().strftime('%Y-%m-%dT%H:%M'))
            self.parent_dir = '{}vae-{}'.format(model_path, network)
            self.savepath = '{}vae-{}/{}/'.format(model_path, network,
                                                  self.savename)
            create_dir(self.savepath)

            self.log.info('storing files under {}'.format(self.savepath))

            params = locals()
            params.pop('self')
            params.pop('optimizer')
            params.pop('session')

            with open('{}/params.json'.format(self.savepath), 'w') as outfile:
                json.dump(params, outfile)
        else:  # load old parameter

            self.savename = load_from
            self.parent_dir = '{}vae-{}'.format(model_path, network)
            self.savepath = '{}vae-{}/{}/'.format(model_path, network,
                                                  self.savename)

            self.log.info('loading model and parameters from {}'.format(
                self.savepath))

            try:
                with open('{}/params.json'.format(self.savepath),
                          'r') as infile:
                    params = json.load(infile)

                for k, v in params.items():
                    setattr(self, k, v)

                # add batch dim
                self.input_shape = (None, ) + tuple(self.input_shape)
            except Exception as e:
                self.log.critical('loading {}/params.json failed!\n{}'.format(
                    self.savepath, e))
                exit(0)

        # store number of channels
        self.num_channels = self.input_shape[-1]

        self.tb = tensorboard

        with tf.variable_scope('input-ph'):
            self._input = tf.placeholder(tf.float32,
                                         shape=self.input_shape,
                                         name='input')
        """ TF Graph setup """
        self.mus, self.logvars, self.z, self._output = \
            build_network(self._input, self.input_shape, latent_dim=self.latent_dim, network_type=self.network)
        print('\n')
        """ Loss """
        # Loss
        # Reconstruction loss
        self.re_loss = K.binary_crossentropy(K.flatten(self._input),
                                             K.flatten(self._output))
        self.re_loss *= self.input_shape[1]**2  # dont square, use correct dims
        self.re_loss *= self.zeta

        # define kullback leibler divergence
        self.kl_loss = 1 + self.logvars - K.square(self.mus) - K.exp(
            self.logvars)
        self.kl_loss = -0.5 * K.mean(self.kl_loss, axis=0)
        self.vae_loss = K.mean(self.re_loss + self.beta * K.sum(self.kl_loss))

        # create optimizer
        if with_opt:
            self.train_op = optimizer(learning_rate=self.lr).minimize(
                self.vae_loss)
        """ TF setup """
        self.s = session if session is not None else tf.Session()
        tf.global_variables_initializer().run(session=self.s)

        # Saver objects handles writing and reading protobuf weight files
        self.saver = tf.train.Saver(var_list=tf.all_variables())

        if load_from is not None:
            self.log.info('restoring graph ... ')
            self.saver.restore(self.s, '{}'.format(self.savepath))
            self.log.info('done!')

        self.log.info('VAE has parameters:')
        print_dict(params, lo=self.log)

        if self.tb:
            self._tensorboard_setup()

        csv_header = ['date', '#episode', '#batch', 'rec-loss', 'kl-loss'] +\
                     ['z{}-kl'.format(i) for i in range(self.latent_dim)]
        self.csv = CSVLogger('{}/progress.csv'.format(self.savepath),
                             *csv_header)
示例#10
0
    def __init__(self,
                 rlpath,
                 input_shape,
                 network='pendulum',
                 latent_dim=20,
                 beta=1.0,
                 k=5,
                 init_from=None,
                 with_attrs=False,
                 sess=None,
                 scaled_re_loss=True):

        self.log = logging.getLogger('vae')

        self.input_shape = (None, ) + input_shape
        self.scaled_re_loss = scaled_re_loss
        self.latent_dim = latent_dim
        self.with_attrs = with_attrs
        self.init_from = init_from
        self.network = network
        self.beta = beta
        self.k = k

        self.savepath = f'{rlpath}/vae/'.replace('//', '/')
        create_dir(self.savepath)

        self.log.info('storing files under {}'.format(self.savepath))

        params = locals()
        params.pop('self')
        params.pop('sess')

        if not self.with_attrs:

            with open(f'{self.savepath}/params.json', 'w') as outfile:
                json.dump(params, outfile)
        else:
            self.log.info('load_base_weights() needs to be called!')

        with tf.variable_scope('vae', reuse=tf.AUTO_REUSE):
            self.X = tf.placeholder(tf.float32,
                                    shape=(
                                        None,
                                        k,
                                    ) + self.input_shape[1:],
                                    name='stacked-vae-input')
        """ TF setup """
        self.s = sess
        assert self.s is not None, 'you need to pass a tf.Session()'
        """ TF Graph setup """

        self.mus = []
        self.logvars = []
        self.z = []
        self.Xhat = []

        for i in range(self.k):
            m, lv, z, xh = \
                build_network(self.X[:, i, ...], self.input_shape, latent_dim=self.latent_dim, network_type=self.network)
            self.mus.append(m)
            self.logvars.append(lv)
            self.z.append(z)
            self.Xhat.append(xh)
        print('\n')

        self.U = tf.concat(self.mus, axis=1)

        # Saver objects handles writing and reading protobuf weight files
        self.saver = tf.train.Saver(var_list=tf.trainable_variables(
            scope='vae'))

        if init_from:
            self._load_base_weights()
        """ Losses """
        # Loss
        # Reconstruction loss
        rels = []
        for i in range(self.k):
            from tensorflow.contrib.layers import flatten
            inp, outp = flatten(self.X[:, i, ...]), flatten(self.Xhat[i])
            xent = K.binary_crossentropy(inp, outp)
            if self.scaled_re_loss:
                xent *= (self.input_shape[1]**2)
            rels.append(xent)
        self.re_loss = tf.reduce_mean(tf.stack(rels), axis=0)

        # define kullback leibler divergence
        kls = []
        for i in range(self.k):
            kls.append(-0.5 * K.mean(
                (1 + self.logvars[i] - K.square(self.mus[i]) -
                 K.exp(self.logvars[i])),
                axis=0))
        self.kl_loss = tf.reduce_mean(tf.stack(kls), axis=0)

        self.vae_loss = K.mean(self.re_loss + self.beta * K.sum(self.kl_loss))

        self.log.info('VAE has parameters:')
        print_dict(params, lo=self.log)