コード例 #1
0
        self.mean_squared_error = []

    def on_epoch_end(self, batch, logs={}):
        self.losses.append(logs.get('loss'))
        self.mean_squared_error.append(logs.get('mean_squared_error'))


history = LossHistory()

callbacks_list = [checkpoint, history]
#callbacks_list = [checkpoint]

###########################
### Start Training the network
###########################
subFields = lnn.loadBaseFNames(mapLoc)
base = [mapLoc + s for s in subFields]

dataset = tf.data.Dataset.from_tensor_slices(tf.convert_to_tensor(base))
dataset = dataset.shuffle(buffer_size=len(base))
dataset = dataset.map(lambda item: tuple(
    tf.py_func(lnn.utf8FileToMapAndLum, [item, 'basic', True],
               [tf.float64, tf.float64])))
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)

history = model2.fit(dataset,
                     epochs=epochs,
                     steps_per_epoch=steps_per_epoch,
                     callbacks=callbacks_list,
                     verbose=1)
コード例 #2
0
# where to store things
data_loc = '../data/'
predict_loc = data_loc + 'predictions/'

# load in the config file
config = configparser.ConfigParser()
config.read(data_loc + config_name + '.ini')
models = config.sections()

# defaults
defaults = lnn.get_config_info(config, models[0])

# start the work to load the validation maps
cur_map_loc = config[models[0]]['map_loc']
subFields = lnn.loadBaseFNames(cur_map_loc)

# set random seed so data is shuffeled the same way
# every time and then make the seed random
np.random.seed(1234)
np.random.shuffle(subFields)
np.random.seed()

# get map locations
valPer = 0.2
valPoint = int(len(subFields) * (1 - valPer))
base_val = [cur_map_loc + s for s in subFields[valPoint:]]

# debug
# debug_map_numbs = 2
# base_val = base_val[:debug_map_numbs]