Example #1
0
num_authors=len(labels)
num_forms_per_author=-1
shingle_dim=(56,56)
batch_size=32
load_size=batch_size*1000
iterations = 1000
lr = 0.001


# ### Define your model
# 
# Here, we're using the Fiel Network

# In[6]:

vnet = load_verbatimnet( 'fc7', paramsfile=paramsfile, compiling=False )
vnet.add(Dense(num_authors))
vnet.add(Activation('softmax'))
sgd = SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True)
vnet.compile(loss='categorical_crossentropy', optimizer=sgd)
print "Finished compilation"


# ### Minibatcher (to load in your data for each batch)
if False:
    mini_m = Hdf5MiniBatcher(hdf5authors, num_authors, num_forms_per_author,
                            shingle_dim=shingle_dim, default_mode=MiniBatcher.TRAIN,
                            batch_size=batch_size, add_rotation=True)
else:
    mini_m = IAM_MiniBatcher(hdf5authors, num_authors, num_forms_per_author,
                            shingle_dim=shingle_dim, default_mode=MiniBatcher.TRAIN,
Example #2
0
# featurefile = 'icdar13data/experimental-processed/icdar13ex_fiel657.npy'
# featurefile = 'nmecdata/nmec_bw_fiel657_features_steps5_thresh.0025.npy'
featurefile = 'nmecdata/nmec_bw_fiel657_features_steps5_thresh.15.npy'

# This is the neural networks and parameters you are deciding to use
paramsfile = '/fileserver/iam/iam-processed/models/fiel_657.hdf5'

# ### Full image HDF5 file
#
# Each entry in the HDF5 file is a full image/form/document

labels = h5py.File(hdf5images).keys()

# ### Load feature extractor neural network
if not load_features:
    vnet = load_verbatimnet('fc7', paramsfile=paramsfile)
    vnet.compile(loss='mse', optimizer='sgd')

# ### Image features
#
# Currently taken as averages of all shard features in the image. You can either load them or extract everything manually, depending on if you have the .npy array.

if load_features:
    print "Loading features in from " + featurefile
    imfeats = np.load(featurefile)
    print "Loaded features"
else:
    print "Begin extracting features from " + hdf5images
    imfeats = extract_imfeats(hdf5images, vnet, steps=(5, 5), varthresh=0.05)
    print h5py.File(hdf5images).keys()
    np.save(featurefile, imfeats)
Example #3
0
# featurefile = '/fileserver/nmec-handwriting/nmec_cropped_fiel657_features_steps5_thresh.005.npy'
featurefile = '/fileserver/nmec-handwriting/globalfeatures/nmec_bw_crop.deNNam_fiel657.steps5_mean250.hdf5'
outdir='/fileserver/nmec-handwriting/localfeatures/nmec_bw_crop.deNNiam_fiel657.steps5_mean250/'

# This is the neural networks and parameters you are deciding to use
paramsfile = '/fileserver/iam/iam-processed/models/fiel_657.hdf5'


### Full image HDF5 file
# Each entry in the HDF5 file is a full image/form/document
labels = h5py.File(hdf5images).keys()


# ### Load feature extractor neural network
if not load_features:
   vnet = load_verbatimnet( 'fc7', paramsfile=paramsfile )
   vnet.compile(loss='mse', optimizer='sgd')


### Image features
# Currently taken as averages of all shard features in the image. You can either load them or extract everything manually, depending on if you have the .npy array.
if load_features:
    print "Loading features in from "+featurefile
    imfeats = np.load(featurefile)
    print "Loaded features"
else:
    print "Begin extracting features from "+hdf5images
    noiseparamfile = '/work/code/repo/models/conv2_linet_iam-bin.hdf5'
    imfeats = extract_imfeats(hdf5images, vnet, denoiser=load_denoisenet(noiseparams=noiseparamfile), 
                              outdir=outdir, steps=(5,5), compthresh=250 )
    print h5py.File(hdf5images).keys()
Example #4
0
# featurefile = '/fileserver/nmec-handwriting/nmec_bw.deNN_fiel657.step5_noE.npy'
# featurefile = '/fileserver/nmec-handwriting/nmec_bw_crop.deNN_fiel657.step5_250.npy'
featurefile = 'nmec_bw_crop.fiel657_120.step20_250.npy'

# This is the neural networks and parameters you are deciding to use
# paramsfile = '/fileserver/iam/iam-processed/models/fiel_657.hdf5'
paramsfile = 'fielnet120-nmec.hdf5'

# ### Full image HDF5 file
#
# Each entry in the HDF5 file is a full image/form/document
labels = h5py.File(hdf5images).keys()

# ### Load feature extractor neural network
vnet = load_verbatimnet('fc7',
                        input_shape=(1, ) + shingle_dims,
                        paramsfile=paramsfile)
vnet.compile(loss='mse', optimizer='sgd')
print "Finished loading neural network in and compilation"

# ### Image features
#
# Currently taken as averages of all shard features in the image. You can either load them or extract everything manually, depending on if you have the .npy array.
if load_features:
    print "Loading features in from " + featurefile
    imfeats = np.load(featurefile)
    print "Loaded features"
else:
    print "Begin extracting features from " + hdf5images
    imfeats = extract_imfeats(hdf5images,
                              vnet,
Example #5
0
# featurefile = '/fileserver/nmec-handwriting/nmec_bw_crop.deNN_fiel657.step5_250.npy'
featurefile = 'nmec_bw_crop.fiel657_120.step20_250.npy'

# This is the neural networks and parameters you are deciding to use
# paramsfile = '/fileserver/iam/iam-processed/models/fiel_657.hdf5'
paramsfile = 'fielnet120-nmec.hdf5'


# ### Full image HDF5 file
# 
# Each entry in the HDF5 file is a full image/form/document
labels = h5py.File(hdf5images).keys()


# ### Load feature extractor neural network
vnet = load_verbatimnet( 'fc7', input_shape=(1,)+shingle_dims, paramsfile=paramsfile )
vnet.compile(loss='mse', optimizer='sgd')
print "Finished loading neural network in and compilation"


# ### Image features
# 
# Currently taken as averages of all shard features in the image. You can either load them or extract everything manually, depending on if you have the .npy array.
if load_features:
    print "Loading features in from "+featurefile
    imfeats = np.load(featurefile)
    print "Loaded features"
else:
    print "Begin extracting features from "+hdf5images
    imfeats = extract_imfeats( hdf5images, vnet, shingle_dims=shingle_dims, steps=(20,20), compthresh=250 )
    print h5py.File(hdf5images).keys()