Ejemplo n.º 1
0
    def __init__(self, config, subtask, dataset, tfconfig):
        logdir = os.path.join(config.logdir, subtask)
        self.config = copy.deepcopy(config)
        self.config.subtask = subtask
        self.graph = tf.Graph()
        self.sess = tf.Session(graph=self.graph, config=tfconfig)
        with self.graph.as_default():
            if os.path.exists(os.path.join(logdir, "mean.h5")):
                training_mean = loadh5(os.path.join(logdir, "mean.h5"))
                training_std = loadh5(os.path.join(logdir, "std.h5"))
                print("[{}] Loaded input normalizers for testing".format(
                    subtask))
    
                # Create the model instance
                self.network = Network(self.sess, self.config, dataset, {
                                       'mean': training_mean, 'std': training_std})
            else:
                self.network = Network(self.sess, self.config, dataset)
    
            self.saver = {}
            self.best_val_loss = {}
            self.best_step = {}
            # Create the saver instance for both joint and the current subtask
            for _key in ["joint", subtask]:
                self.saver[_key] = tf.train.Saver(self.network.allparams[_key])

            # We have everything ready. We finalize and initialie the network here.
        
            self.sess.run(tf.global_variables_initializer())
            restore_res = self.restore_network()
            if not restore_res:
                raise RuntimeError("Could not load network weights!")
Ejemplo n.º 2
0
    def __init__(self, config, rng):
        self.config = config
        self.rng = rng

        # Open a tensorflow session. I like keeping things simple, so I don't
        # use a supervisor. I'm just going to do everything manually. I also
        # will just allow the gpu memory to grow
        tfconfig = tf.ConfigProto()
        tfconfig.gpu_options.allow_growth = True
        self.sess = tf.Session(config=tfconfig)

        # Create the dataset instance
        self.dataset = Dataset(self.config, rng)
        # Retrieve mean/std (yes it is hacky)
        logdir = os.path.join(self.config.logdir, self.config.subtask)
        if os.path.exists(os.path.join(logdir, "mean.h5")):
            training_mean = loadh5(os.path.join(logdir, "mean.h5"))
            training_std = loadh5(os.path.join(logdir, "std.h5"))
            print("[{}] Loaded input normalizers for testing".format(
                self.config.subtask))

            # Create the model instance
            self.network = Network(self.sess, self.config, self.dataset, {
                'mean': training_mean,
                'std': training_std
            })
        else:
            self.network = Network(self.sess, self.config, self.dataset)
        # Make individual saver instances for each module.
        self.saver = {}
        self.best_val_loss = {}
        self.best_step = {}
        # Create the saver instance for both joint and the current subtask
        for _key in ["joint", self.config.subtask]:
            self.saver[_key] = tf.train.Saver(self.network.allparams[_key])

        #print('\nNETWORK PARAMETERS')
        #for item in self.network.allparams[_key]:
        #    print(item)

        # We have everything ready. We finalize and initialie the network here.
        self.sess.run(tf.global_variables_initializer())
Ejemplo n.º 3
0
    def __init__(self, config, rng):
        self.config = config
        self.rng = rng

        # Open a tensorflow session. I like keeping things simple, so I don't
        # use a supervisor. I'm just going to do everything manually. I also
        # will just allow the gpu memory to grow
        tfconfig = tf.ConfigProto()
        if self.config.usage > 0:
            tfconfig.gpu_options.allow_growth = False
            tfconfig.gpu_options.per_process_gpu_memory_fraction = \
                self.config.usage
        else:
            tfconfig.gpu_options.allow_growth = True
        self.sess = tf.Session(config=tfconfig)

        # Create the dataset instance
        self.dataset = Dataset(self.config, rng)
        # import IPython
        # IPython.embed()
        # Create the model instance
        self.network = Network(self.sess, self.config, self.dataset)
        # Make individual saver instances and summary writers for each module
        self.saver = {}
        self.summary_writer = {}
        self.best_val_loss = {}
        self.best_step = {}
        # Saver (only if there are params)
        for _key in self.network.allparams:
            if len(self.network.allparams[_key]) > 0:
                with tf.variable_scope("saver-{}".format(_key)):
                    self.saver[_key] = tf.train.Saver(
                        self.network.allparams[_key])
        # Summary Writer
        self.summary_writer[self.config.subtask] = tf.summary.FileWriter(
            os.path.join(self.config.logdir, self.config.subtask),
            graph=self.sess.graph)
        # validation loss
        self.best_val_loss[self.config.subtask] = np.inf
        # step for each module
        self.best_step[self.config.subtask] = 0

        # We have everything ready. We finalize and initialie the network here.
        self.sess.run(tf.global_variables_initializer())

        # Enable augmentations and/or force the use of the augmented set
        self.use_aug_rot = 0
        if self.config.augment_rotations:
            self.use_aug_rot = 1
        elif self.config.use_augmented_set:
            self.use_aug_rot = -1