Example #1
0
def main():
    # Load dataset
    train_x, train_y = mnist.load_dataset(download=True, train=True)
    test_x, test_y = mnist.load_dataset(download=True, train=False)

    # Batch Gradient Descent
    print("Training using Batch Gradient Descent")
    w1_bgd, b1_bgd, w2_bgd, b2_bgd, loss_history_bgd = train(train_x,
                                                             train_y,
                                                             learning_rate=0.2,
                                                             num_epochs=20,
                                                             batch_size=None)

    # Minibatch Gradient Descent with batch_size 64
    print("Training using mini-Batch Gradient Descent with batch size of 64")
    w1_mbgd, b1_mbgd, w2_mbgd, b2_mbgd, loss_history_mbgd = train(
        train_x, train_y, learning_rate=0.1, num_epochs=20, batch_size=64)

    # Display loss curve
    plt.plot(loss_history_bgd, label="Batch Gradient Descent")
    plt.plot(loss_history_mbgd, label="mini-Batch Gradient Descent")
    plt.legend()
    plt.show()

    # Test
    print("Batch Gradient Descent")
    test(test_x, test_y, w1_bgd, b1_bgd, w2_bgd, b2_bgd)
    print("mini-Batch Gradient Descent")
    test(test_x, test_y, w1_mbgd, b1_mbgd, w2_mbgd, b2_mbgd)
Example #2
0
File: set.py Project: wacabanga/pdt
def main(argv):

    options = handle_options('set', argv)

    mnist_data = load_dataset()
    X_train = mnist_data[0].reshape(-1, 28, 28, 1)
    sfx = gen_sfx_key(('adt', 'nblocks', 'block_size'), options)

    empty_set_args = {'initializer': tf.random_uniform_initializer}
    set_adt, set_pdt = gen_set_adt(X_train,
                                   options,
                                   store_args=options,
                                   is_in_args=options,
                                   size_args=options,
                                   is_empty_args=options,
                                   empty_set_args=empty_set_args,
                                   nitems=options['nitems'],
                                   batch_size=options['batch_size'])

    graph = tf.get_default_graph()
    savedir = mk_dir(sfx)

    path_name = os.path.join(
        os.environ['DATADIR'],
        'graphs',
        sfx,
    )
    tf.train.SummaryWriter(path_name, graph)

    load_train_save(options, set_adt, set_pdt, sfx, savedir)
    push, pop = pdt.call_fns
Example #3
0
def NerNet(batchsize = 50, epochs = 10):

	def Ner(X_inputs, y_targets, batch_size = 50, num_epochs = 10):
		input_X = T.tensor4('Input')
		target_y = T.vector('Target', dtype='int32')

		input_layer  = lasagne.layers.InputLayer(shape=(None,1,28,28), input_var=input_X, name = "Input")
		dense_layer  = lasagne.layers.DenseLayer(input_layer,num_units=100, nonlinearity=sigmoid, name = "Dense")
		output_layer = lasagne.layers.DenseLayer(dense_layer,num_units = 10, nonlinearity=softmax, name = "Output")

		y_predicted = lasagne.layers.get_output(output_layer)
		all_weights = lasagne.layers.get_all_params(output_layer)

		loss = lasagne.objectives.categorical_crossentropy(y_predicted,target_y).mean()
		accuracy = lasagne.objectives.categorical_accuracy(y_predicted,target_y).mean()
		updates_sgd = lasagne.updates.rmsprop(loss, all_weights,learning_rate=0.01)

		train_fun = theano.function([input_X,target_y],[loss,accuracy],updates= updates_sgd)
		accuracy_fun = theano.function([input_X,target_y],accuracy)
		pred_fun = theano.function([input_X], y_predicted)

		for epoch in range(num_epochs):
		    # In each epoch, we do a full pass over the training data:
		    train_err = 0
		    train_acc = 0
		    train_batches = 0
		    start_time = time.time()
		    for batch in iterate_minibatches(X_inputs, y_targets, batch_size):
		        inputs, targets = batch
		        train_err_batch, train_acc_batch= train_fun(inputs, targets)
		        train_err += train_err_batch
		        train_acc += train_acc_batch
		        train_batches += 1

		    # And a full pass over the validation data:
		    val_acc = 0
		    val_batches = 0
		    for batch in iterate_minibatches(X_val, y_val, batch_size):
		        inputs, targets = batch
		        val_acc += accuracy_fun(inputs, targets)
		        val_batches += 1

		    
		    # Then we print the results for this epoch:
		    print("Epoch {} of {} took {:.3f}s".format(
		        epoch + 1, num_epochs, time.time() - start_time))

		    print("  training loss (in-iteration):\t\t{:.6f}".format(train_err / train_batches))
		    print("  train accuracy:\t\t{:.2f} %".format(
		        train_acc / train_batches * 100))
		    print("  validation accuracy:\t\t{:.2f} %".format(
		        val_acc / val_batches * 100))

		return pred_fun, accuracy_fun



	X_train,y_train,X_val,y_val,X_test,y_test = load_dataset()
	return Ner(X_train, y_train, batch_size = batchsize, num_epochs = epochs)
 def init_data(self):
     X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
     self.train_X = X_train
     self.train_y = y_train
     self.test_X = X_test
     self.test_y = y_test
     self.val_X = X_val
     self.val_y = y_val
def main():
    # Load dataset
    train_x, train_y = mnist.load_dataset(download=True, train=True)
    test_x, test_y = mnist.load_dataset(download=True, train=False)

    # Train
    w1, b1, w2, b2, loss_history = train(train_x, train_y)

    # Display loss curve
    plt.plot(loss_history, label="Training Loss")
    plt.show()

    # Test
    test(test_x, test_y, w1, b1, w2, b2)

    # A little visualization
    visualization(test_x, test_y, w1, b1, w2, b2)
    plt.show()
Example #6
0
def train_model(config: Union[
    str, AsciiLetterConfig] = AsciiLetterConfig()) -> None:
    """Train the model and save classifier and feature weights."""
    if isinstance(config, str):
        config = AsciiLetterConfig.from_yaml(config)
    files = AsciiLetterFiles(config)
    x_train, y_train = load_dataset(files.train_dataset, config)
    x_test, y_test = load_dataset(files.test_dataset, config)
    model = ascii_letter_classifier(config)
    model.fit(
        x_train,
        y_train,
        validation_data=(x_test, y_test),
        verbose=config.verbose,
        epochs=config.n_epochs,
        steps_per_epoch=config.steps_per_epoch,
        validation_steps=config.validation_steps,
    )
    model.save_weights(files.model_weights, overwrite=True)
    return 'OK'
Example #7
0
def get_base_data(half_n_samples, alpha0=0):
    X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
    all_data = np.vstack((X_train, X_val, X_test))
    all_data = all_data.reshape(-1, 28, 28)
    subsample_indices = np.random.choice(len(all_data),
                                         half_n_samples,
                                         replace=False)

    all_data = all_data[subsample_indices]

    if alpha0 != 0:
        all_data = rotate_dataset(all_data, alpha0)
    return all_data
Example #8
0
def main(argv):
    global queue_adt, queue_pdt, sess, X_train, sfx
    options = handle_options('queue', argv)

    mnist_data = load_dataset()
    X_train = mnist_data[0].reshape(-1, 28, 28, 1)
    sfx = gen_sfx_key(('adt', 'nblocks', 'block_size'), options)

    empty_queue_args = {'initializer': tf.random_uniform_initializer}
    queue_adt, queue_pdt = gen_queue_adt(X_train,
                                         options,
                                         enqueue_args=options,
                                         nitems=options['nitems'],
                                         dequeue_args=options,
                                         empty_queue_args=empty_queue_args,
                                         batch_size=options['batch_size'])

    savedir = mk_dir(sfx)
    sess = load_train_save(options, queue_adt, queue_pdt, sfx, savedir)
# Parse the arguments and perform some clean up dropping out the None values
args = {k: v for k, v in vars(parser.parse_args()).iteritems() if v}
print(args)

################################################################


def iterate_minibatches(inputs, targets, batchsize):
    p = np.random.permutation(inputs.shape[0])
    for idx in range(0, inputs.shape[0] - batchsize + 1, batchsize):
        yield (inputs[p])[idx:idx + batchsize], (targets[p])[idx:idx +
                                                             batchsize]


print("Loading the data")
X_train, y_train, X_test, y_test = mnist.load_dataset(args['nbtrain'],
                                                      args['nbtest'], False)

############################################################################
# Model building

print("Model architecture : ")
filename_prefix = ""
if ('logreg' in args):
    print("    Logistic regression, Input -> Softmax")
    model = LogisticRegression.Model(28, 28, 1, 10)
    filename_prefix += "logreg_"
elif 'mlp' in args:
    print("    Multilayer perceptron with : ")
    filename_prefix += "mlp_"
    sys.exit()
elif 'cnn' in args:
Example #10
0
        return F.softmax_cross_entropy(y, t)

    def accuracy(self, x, t):
        y = self.predict(x)
        accuracy = F.accuracy(y, t)
        return accuracy

    def gradient(self, x, t):
        loss = self.loss(x, t)
        self.net.cleargrads()
        loss.backward()
        return loss.grad


if __name__ == "__main__":
    dataset = mnist.load_dataset()
    x_train = dataset['train_img']
    t_train = dataset['train_label']

    x_test = dataset['test_img']
    t_test = dataset['test_label']

    network = TwoLayerNet(784, 50, 10)

    iters_num = 10000
    train_size = x_train.shape[0]

    batch_size = 100
    learning_rate = 0.1

    optimizer = chainer.optimizers.SGD(lr=learning_rate)
            # Then we print the results for this epoch:
            print("for {}".format(res_name))
            print("Epoch {} of {} took {:.3f}s".format(
                epoch + 1, num_epochs,
                time.time() - start_time))

            print("  training loss (in-iteration):\t\t{:.6f}".format(
                train_err / train_batches))
            print("  train accuracy:\t\t{:.2f} %".format(train_acc /
                                                         train_batches * 100))
            print("  validation accuracy:\t\t{:.2f} %".format(
                val_acc / val_batches * 100))
            res["train_err"].append(train_err / train_batches)
            res["train_acc"].append(train_acc / train_batches * 100)
            res["val_acc"].append(val_acc / val_batches * 100)
    # Just profile if you need
    pr.disable()
    pr.print_stats(sort='cumtime')
    for res in results.values():
        res.pop('train_fun')
        res.pop('accuracy_fun')
    with open("comparative_history.dict", 'wb') as pickle_file:
        pickle.dump(results, pickle_file)


if __name__ == "__main__":
    from mnist import load_dataset
    X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
    print(X_train.shape, y_train.shape)

    run(X_train, y_train, X_val, y_val, X_test, y_test)
Example #12
0
import os
import sys

import numpy as np

from neuralnetwork.network import load_from_file
from mnist import load_dataset, result_to_digit

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("digits_recognizer")

with open("digits_recognizer_setup.txt", "r") as f:
    neural_network = load_from_file(f)
logger.info("Testing")
logger.info("Loading testing dateset")
testing_data = load_dataset('./datasets/mnist/mnist_test.csv')
logger.info("Testing dateset loaded")

logger.info("Testing network")
good = 0
bad = 0
case_index = 0
for sample in testing_data:
    result = neural_network.feedforward(sample.input)
    expected_result = result_to_digit(sample.output)
    actual_result = result_to_digit(result)
    print("Case %d: %d %d %d" % (case_index, actual_result, expected_result,
                                 actual_result == expected_result))
    if actual_result == expected_result:
        good += 1
    else:
Example #13
0
            pad='same',
            stride=(1, 1))
        n_params += lasagne.layers.count_params(l_prev)
        l_prev = lasagne.layers.ConcatLayer([l_unit, l_prev])
        l_prev = lasagne.layers.NonlinearityLayer(
            l_prev, nonlinearity=lasagne.nonlinearities.rectify)

    l_out = lasagne.layers.DenseLayer(
        l_prev, num_units=10, nonlinearity=lasagne.nonlinearities.softmax)
    n_params += lasagne.layers.count_params(l_out)
    print("%i parameters to find." % n_params)
    return input_var, l_out


print("Loading the data")
X_train, y_train, X_test, y_test = mnist.load_dataset(nb_training_samples,
                                                      nb_test_samples, False)

print("Building the model")
input_var, model = build_model((16, ))
prediction = lasagne.layers.get_output(model)

# Define the Negative Log Likelihood loss
target_var = T.ivector('targets')
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()

l2 = lasagne.regularization.regularize_network_params(
    model, lasagne.regularization.l2)
loss += weight_decay * l2

# Define the update of the weights
Example #14
0
        l_unit = lasagne.layers.NonlinearityLayer(l_prev, nonlinearity=lasagne.nonlinearities.identity)
        l_prev = lasagne.layers.Conv2DLayer(l_prev, k, 3, nonlinearity=lasagne.nonlinearities.rectify, pad='same', stride=(1,1))
        n_params += lasagne.layers.count_params(l_prev)
        l_prev = lasagne.layers.Conv2DLayer(l_prev, k, 3, nonlinearity=lasagne.nonlinearities.identity, pad='same', stride=(1,1))
        n_params += lasagne.layers.count_params(l_prev)
        l_prev = lasagne.layers.ConcatLayer([l_unit, l_prev])
        l_prev = lasagne.layers.NonlinearityLayer(l_prev, nonlinearity=lasagne.nonlinearities.rectify)

    l_out = lasagne.layers.DenseLayer(l_prev, num_units=10, nonlinearity=lasagne.nonlinearities.softmax)
    n_params += lasagne.layers.count_params(l_out)
    print("%i parameters to find."% n_params)
    return input_var, l_out


print("Loading the data")
X_train, y_train, X_test, y_test = mnist.load_dataset(nb_training_samples, nb_test_samples, False)

print("Building the model")
input_var, model = build_model((16,))
prediction = lasagne.layers.get_output(model)

# Define the Negative Log Likelihood loss
target_var = T.ivector('targets')
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()

l2 = lasagne.regularization.regularize_network_params(model, lasagne.regularization.l2)
loss += weight_decay * l2

# Define the update of the weights
params = lasagne.layers.get_all_params(model, trainable=True)
Example #15
0
def main_new(num_epochs=500, num_train=0, use_existing=False, rotate_angle=0):
    # Load the dataset
    batch_size=100
    thresh=.9
    eta_init=np.float32(.001)
    print("Loading data...")
    X_train_in, y_train_in, X_val_in, y_val_in, X_test_in, y_test_in = mnist.load_dataset()
    if (rotate_angle>0):
        X_train_in=mnist.rotate_dataset(X_train_in,angle=rotate_angle)
        X_val_in=mnist.rotate_dataset(X_val_in,angle=rotate_angle)
        X_test_in=mnist.rotate_dataset(X_test_in,angle=rotate_angle)

    if (num_train==0):
        num_train=np.shape(y_train_in)[0]
    #X_train_r=rotate_dataset(X_train,12,num_train)
    #X_val_r=rotate_dataset(X_val,12,np.shape(X_val)[0])
    X_train,  X_train_c, y_train=compare_net.create_paired_data_set(X_train_in, y_train_in, num_train)
    X_val, X_val_c, y_val = compare_net.create_paired_data_set(X_val_in, y_val_in, num_train)
    X_test, X_test_c, y_test = compare_net.create_paired_data_set(X_test_in, y_test_in, num_train)
    X_test1, X_test_f, y_test_f, y_label = compare_net.create_paired_data_set_with_fonts(X_test_in, y_test_in, 10000)

    # Prepare Theano variables for inputs and targets
    input_var1 =  T.tensor4('inputs')
    input_var2 = T.tensor4('inputs_comp')
    target_var = T.fvector('target')

    # Create neural network model (depending on first command line parameter)
    print("Building model and compiling functions...")

    network = compare_net.build_cnn_new_conv(input_var1, input_var2)
    if (os.path.isfile('net.npy') and use_existing):
        spars=np.load('net.npy')
        lasagne.layers.set_all_param_values(network,spars)
        #layers=lasagne.layers.get_all_layers(network)

    # Create a loss expression for training, i.e., a scalar objective we want
    # to minimize (for our multi-class problem, it is the cross-entropy loss):
    corr = lasagne.layers.get_output(network)


    corr=correlation(corr[0,],corr[1,])
    #loss=T.mean(T.square(T.sum(corr,axis=1)-target_var))
    loss=T.mean(T.square(corr-target_var))

    acc = T.mean(T.eq(corr>thresh, target_var),
                      dtype=theano.config.floatX)

    # We could add some weight decay as well here, see lasagne.regularization.

    # Create update expressions for training, i.e., how to modify the
    # parameters at each training step. Here, we'll use Stochastic Gradient
    # Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more.
    params = lasagne.layers.get_all_params(network, trainable=True)
    print(params)

    eta = theano.shared(np.array(eta_init, dtype=theano.config.floatX))
    #eta_decay = np.array(0.95, dtype=theano.config.floatX)
    updates = lasagne.updates.nesterov_momentum(
            loss, params, learning_rate=eta, momentum=0.9)
    #updates = lasagne.updates.sgd(
    #        loss, params, learning_rate=eta)

    # Create a loss expression for validation/testing. The crucial difference
    # here is that we do a deterministic forward pass through the network,
    # disabling dropout layers.
    test_corr = lasagne.layers.get_output(network, deterministic=True)
    test_corr=correlation(test_corr[0,], test_corr[1,])
    #test_loss=T.mean(T.square(T.sum(test_corr,axis=1)-target_var))
    test_loss=T.mean(T.square(test_corr-target_var))

    # As a bonus, also create an expression for the classification accuracy:
    test_acc = T.mean(T.eq(test_corr>thresh, target_var),
                      dtype=theano.config.floatX)

    # Compile a function performing a training step on a mini-batch (by giving
    # the updates dictionary) and returning the corresponding training loss:
    train_fn = theano.function([input_var1, input_var2, target_var], [loss, acc, corr], updates=updates)

    # Compile a second function computing the validation loss and accuracy:
    val_fn = theano.function([input_var1, input_var2, target_var], [test_loss, test_acc, test_corr])

    # Finally, launch the training loop.
    print("Starting training...")
    # We iterate over epochs:
    t=1
    for epoch in range(num_epochs):
        # In each epoch, we do a full pass over the training data:
        train_err = 0
        train_acc = 0
        train_batches = 0
        start_time = time.time()
        print(eta.get_value())
        for batch in iterate_minibatches_new(X_train,X_train_c, y_train, batch_size, shuffle=True):
            inputs1, inputs2, targets = batch
            eta.set_value(eta_init) #/np.float32(t))
            bloss, bacc, bcorr = train_fn(inputs1,inputs2,targets)

            train_err += bloss
            train_acc += bacc
            train_batches += 1
            t=t+1

        # And a full pass over the validation data:

        val_acc=0
        val_err = 0
        val_batches = 0
        for batch in iterate_minibatches_new(X_val,X_val_c, y_val, batch_size, shuffle=False):
            inputs1, inputs2, targets = batch
            err, acc, tcorr = val_fn(inputs1, inputs2, targets)
            val_err += err
            val_acc += acc
            val_batches += 1

        # Then we print the results for this epoch:
        print("Epoch {} of {} took {:.3f}s".format(
            epoch + 1, num_epochs, time.time() - start_time))
        print("  training loss:\t\t{:.6f}".format(train_err / train_batches))
        print(" train accuracy:\t\t{:.6f}".format(train_acc/ train_batches))

        print("  validation loss:\t\t{:.6f}".format(val_err / val_batches))
        print(" validation accuracy:\t\t{:.6f}".format(val_acc/ val_batches))

        if (np.mod(epoch,10)==0 and epoch>0):
            params = lasagne.layers.get_all_param_values(network)
            np.save('net',params)

    # After training, we compute and print the test error:
    test_err = 0
    test_acc = 0
    test_batches = 0

    for batch in iterate_minibatches_new(X_test, X_test_c, y_test, batch_size, shuffle=False):
        inputs1, inputs2, targets = batch
        err, acc, tcorr = val_fn(inputs1, inputs2, targets)

        test_acc += acc
        test_err += err

        test_batches += 1
    print("Final results:")
    print("  test loss:\t\t\t{:.6f}".format(test_err / test_batches))
    print("  test acc:\t\t\t{:.6f}".format(test_acc / test_batches))


    try:
        X_test1
    except NameError:
       print "X_test1 not defined"
    else:
        test_err = 0
        test_acc = 0
        test_batches = 0
        corrs=[]
        for batch in iterate_minibatches_new(X_test1, X_test_f, y_test_f, batch_size, shuffle=False):
            inputs1, inputs2, targets = batch
            err, acc, tcorr = val_fn(inputs1, inputs2, targets)

            corrs.append(np.reshape(tcorr,(10,-1)))
            test_acc += acc
            test_err += err
            test_batches += 1

        CORRS=np.vstack(corrs)
        yii=np.argmax(CORRS,axis=1)
        print("Final results classification:")
        print("  test loss font:\t\t\t{:.6f}".format(test_err / test_batches))
        print("  test acc font:\t\t\t{:.6f}".format(np.double(np.sum(yii==y_label)) / len(yii)))
Example #16
0
parser.add_argument('--epoch', type=int, help='Number of training epochs', default=80)

# Parse the arguments and perform some clean up dropping out the None values
args = {k:v for k, v in vars(parser.parse_args()).iteritems() if v}
print(args)

################################################################


def iterate_minibatches(inputs, targets, batchsize):
    p = np.random.permutation(inputs.shape[0])
    for idx in range(0, inputs.shape[0] - batchsize + 1, batchsize):
        yield (inputs[p])[idx:idx+batchsize],(targets[p])[idx:idx+batchsize]

print("Loading the data")
X_train, y_train, X_test, y_test = mnist.load_dataset(args['nbtrain'], args['nbtest'], False)

############################################################################
# Model building

print("Model architecture : ")
if('logreg' in args):
    print("    Logistic regression, Input -> Softmax")
    model = LogisticRegression.Model(28, 28, 1, 10)
elif 'mpl' in args:
    print("    Multilayer perceptron with : ")
    sys.exit()
elif 'cnn' in args:
    print("    Convolutional neural network with :")
    sys.exit()
elif 'vgg' in args:
Example #17
0
import numpy as np

from neuralnetwork.network import NeuralNetwork, serialize_neural_network
from neuralnetwork.training import Trainer
from mnist import load_dataset

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("digits_recognizer")

# Create neural network
neural_network = NeuralNetwork([784, 20, 10])

# # Train the network
logger.info("Begining training")
logger.info("Loading training dateset")
training_data = load_dataset('./datasets/mnist/mnist_train.csv')
logger.info("Training dateset loaded")

logger.info("Training network")
trainer = Trainer()
trainer.train(network=neural_network,
              training_data=training_data,
              learning_step=1,
              batch_iterations=20,
              batch_size=50,
              min_improvement_per_batch=0.000001,
              max_batches_without_improvement=100,
              cost_estimator_batch_size=5000)

logger.info("Network has been trained")
logger.info("Serializing network setup to digits_recognizer_setup.txt")
        np.random.shuffle(indices)

        for start in range(0, n_samples, batchsize):
            end = min(start + batchsize, n_samples)

            batch_idx = indices[start:end]

            yield X[batch_idx], y[batch_idx]
        if n_samples % batchsize != 0:
            batch_idx = indices[n_samples - n_samples % batchsize :]
            yield X[batch_idx], y[batch_idx]


if __name__ == "__main__":
    from mnist import load_dataset
    X_train,y_train,X_val,y_val,X_test,y_test = load_dataset()
    print(X_train.shape,y_train.shape)

    input_X = T.tensor4("X")
    input_shape = [None,1,28,28]
    target_y = T.vector("target Y integer", dtype='int32')

    #входной слой (вспомогательный)
    input_layer = lasagne.layers.InputLayer(shape = input_shape,input_var=input_X)

    #полносвязный слой, который принимает на вход input layer и имеет 100 нейронов.
    # нелинейная функция - сигмоида как в логистической регрессии
    # слоям тоже можно давать имена, но это необязательно
    dense_1 = DotLayer(input_layer,
                       num_units=10,
                       name = "output")
Example #19
0
def make_seqs(slength=2, num_seqs=20, from_font=False):

#def make_seqs(slength=4):
    imheight=48
    imwidth=48
    print("Loading data...")
    X_train, y_train, X_val, y_val, X_test, y_test = mnist.load_dataset()
    if (from_font):
        Xfont=crop.get_fonts()
    begin=0
    incr=30
    lrange=3
    lrange2=lrange*2+1
    labels=np.floor(np.random.rand(num_seqs,slength,2)*10)
    TEST1=[]
    TEST1a=[]
    TEST2=[]
    for s in range(num_seqs):
        begin1=begin1a=begin2=np.int32(0)
        ii1=labels[s,:,0]
        ii2=labels[s,:,1]
        test1=np.zeros((imheight,imwidth*slength))
        test1a=np.zeros((imheight,imwidth*slength))
        test2=np.zeros((imheight,imwidth*slength))
        if (np.max(np.abs(ii1-ii2))>0):
            for k in range(slength):
                jj1=np.where(y_val==ii1[k])[0]
                jj2=np.where(y_val==ii2[k])[0]
                r1s=np.int32(np.floor(np.random.rand(2)*np.double(len(jj1))))
                r2=np.int32(np.floor(np.random.rand()*np.double(len(jj2))))
                sample1=scipy.misc.imresize(np.squeeze(X_val[jj1[r1s[0]],]),(imheight,imwidth))
                if (not from_font):
                    sample1a=np.squeeze(X_val[jj1[r1s[1]],])
                else:
                    sample1a=Xfont[np.int32(ii1[k]),]

                sample2=np.squeeze(X_val[jj2[r2],])
                sample1a=scipy.misc.imresize(sample1a,(imheight,imwidth))
                sample2=scipy.misc.imresize(sample2,(imheight,imwidth))
                test1[:,begin1:begin1+sample1.shape[1]]=np.maximum(sample1,test1[:,begin1:begin1+sample1.shape[1]])
                test1a[:,begin1a:begin1a+sample1.shape[1]]=np.maximum(sample1a,test1a[:,begin1a:begin1a+sample1.shape[1]])
                test2[:,begin2:begin2+sample1.shape[1]]=np.maximum(sample2,test2[:,begin2:begin2+sample1.shape[1]])
                begin1+=np.int32(np.floor(np.random.rand()*lrange2)-lrange+incr)
                begin1a+=np.int32(np.floor(np.random.rand()*lrange2)-lrange+incr)
                begin2+=np.int32(np.floor(np.random.rand()*lrange2)-lrange+incr)
        TEST1.append(test1)
        TEST1a.append(test1a)
        TEST2.append(test2)

    import pylab as py
    ii=range(num_seqs)
    np.random.shuffle(ii)
    for i in range(5):
        py.figure(num=1,figsize=(12,2),dpi=80)
        py.subplot(131)
        py.imshow(TEST1[ii[i]],aspect='equal')
        py.axis('off')
        py.subplot(132)
        py.imshow(TEST1a[ii[i]],aspect='equal')
        py.axis('off')
        py.subplot(133)
        py.imshow(TEST2[ii[i]],aspect='equal')
        py.axis('off')
        py.show()

    TEST1=np.float32(np.expand_dims(np.array(TEST1),1))
    TEST1a=np.float32(np.expand_dims(np.array(TEST1a),1))
    TEST2=np.float32(np.expand_dims(np.array(TEST2),1))

    return TEST1,TEST1a, TEST2