Exemplo n.º 1
0
    for row in csv.reader(inputfile):
        print row[0]
        if 'filters' in row[0]:
            filters = int(row[0][9:])

# build model
model = nn_model.build_model(filters)

# load trained parameters
model_parameters = save_path + 'model_parameters.hdf'
model.load_weights(model_parameters)

# calculate new reconstructions with the NN
g_nn = model.predict(f)
# resize to original resolution
g_nn = bib_utils.resize_NN_image(g_nn, training=False)

print 'g_nn:', g_nn.shape, g_nn.dtype

# -------------------------------------------------------------------------
# Plot grid of reconstructions

font = {'weight': 'normal', 'size': 3}

plt.rc('font', **font)

nx = 5
ny = 9

from matplotlib import rcParams
rcParams['axes.titlepad'] = 2
Exemplo n.º 2
0
save_path = './Results/'
if not os.path.exists(save_path):
    print 'Creating directory ', save_path
    os.makedirs(save_path)

# ----------------------------------------------------------------------
# Load Data

fname = '../data/tomo_JET.hdf'
f, g, _, _ = bib_data.get_tomo_JET(fname,
                                   faulty=True,
                                   flatten=False,
                                   clip_tomo=True)

# need to reshape image to match NN dimensions
g = bib_utils.resize_NN_image(g, training=True)

print 'g:', g.shape, g.dtype
print 'f:', f.shape, f.dtype

# ------------------------------------------------------------------------
# Divide into training, validation and test set

i_train, i_valid, i_test = bib_utils.divide_data(g.shape[0],
                                                 ratio=[.8, .1, .1],
                                                 test_set=True,
                                                 random=False)

f_valid = f[i_valid]
g_valid = g[i_valid]
f_train = f[i_train]