def train_model(m,nsamples=10000, n_hidden=128, lr=0.01, nepochs=100, val_freq=1):
    INVERT = False
    DIGITS = 3
    MAXLEN = DIGITS + 1 + DIGITS    
    chars = '0123456789+ '
    ctable = CharacterTable(chars, MAXLEN)
    X_train, X_val, y_train, y_val = generate_train_data(nsamples) 

    for epoch in range(nepochs):
        nlls = []
        for i, (x, y) in enumerate(zip(X_train, y_train)):
            sentence_enc, sentence_dec, target = preprocess(x, y)
            nlls += [m.train(sentence_enc, sentence_dec, target, lr)]
            print("%.2f %% completedi - nll = %.2f\r" % ((i + 1) * 100. / len(X_train), np.mean(nlls)), end=" ")
            #print("%.2f %% completedi - nll = %.2f\r" % ((i + 1) * 100. / len(X_train), np.mean(nlls)))
            sys.stdout.flush()
        print

        # evaluation
        if (epoch + 1) % val_freq == 0: 
            print ("Epoch %d" % epoch)
            for i, (x, y) in enumerate(zip(X_val, y_val)):
                sentence_enc, sentence_dec, target = preprocess(x, y)
                y_pred = m.generate_text(sentence_enc)
                try:
                    print("Sample : x = '%s'  y = '%s'" % (ctable.decode(x,False),ctable.decode(y,False)))
                    print ("ground-truth\t", np.concatenate([[sentence_dec[1]], target[:-1]]))
                    print ("predicted   \t", y_pred)
                except IndexError:
                    pass
                if i > 5:
                    break
def main(nsamples=10000,
         n_hidden=128,
         lr=0.01,
         nepochs=100,
         batch_size=64,
         val_freq=1):

    INVERT = False
    DIGITS = 3
    MAXLEN = DIGITS + 1 + DIGITS
    chars = '0123456789+ '
    n_classes = len('0123456789') + 1 # add <eos>
    voc_size = len('0123456789+') + 1 # add <bos> for the decoder 

    # generate the dataset
    ctable = CharacterTable(chars, MAXLEN)
    X_train, X_val, y_train, y_val = generate_train_data(nsamples) 

    # build the model
    m = model(nh=n_hidden,
              nc=n_classes, 
              ne=voc_size,
              batch_size=batch_size,
              natt=20)

    b_sentence_enc = np.zeros((batch_size,MAXLEN)).astype('int32')
    b_sentence_dec = np.zeros((batch_size,DIGITS + 2)).astype('int32')
    b_target = np.zeros((batch_size,DIGITS + 2)).astype('int32')
    print(m.debug(b_sentence_enc,b_sentence_dec,b_target))
    # training
    for epoch in range(nepochs):
        nlls = []
        for batch_num in range(len(X_train) / batch_size):
            b_sentence_enc = np.zeros((batch_size,MAXLEN)).astype('int32')
            b_sentence_dec = np.zeros((batch_size,DIGITS + 1)).astype('int32')
            b_target = np.zeros((batch_size,DIGITS + 1)).astype('int32')
            for i in range(batch_size):
                x, y = X_train[batch_num*batch_size + i], y_train[batch_num*batch_size + i]
                sentence_enc, sentence_dec, target = preprocess(x, y)
                b_sentence_enc[i,] = sentence_enc
                b_sentence_dec[i,] = sentence_dec
                b_target[i,] = target                        
                nlls += [m.train(b_sentence_enc, b_sentence_dec, b_target, lr)]

            print "%.2f %% completedi - nll = %.2f\r" % ((i + 1) * 100. / len(X_train), np.mean(nlls)), 
            sys.stdout.flush()
        print

        # evaluation
        if (epoch + 1) % val_freq == 0: 
            for i, (x, y) in enumerate(zip(X_val, y_val)):
                sentence_enc, sentence_dec, target = preprocess(x, y)
                y_pred = m.generate_text(sentence_enc)
                try:
                    print "ground-truth\t", np.concatenate([[sentence_dec[1]], target[:-1]])
                    print "predicted   \t", y_pred
                except IndexError:
                    pass
                if i > 5:
                    break
Exemple #3
0
def main(nsamples=100,
         dim_embedding=15,
         n_hidden=128,
         lr=0.1,
         nepochs=10):

    INVERT = False
    DIGITS = 3
    MAXLEN = DIGITS + 1 + DIGITS
    chars = '0123456789+ '
    n_classes = len('0123456789') + 1 # add <eos>
    voc_size = len('0123456789+') + 1 # add <bos> for the decoder 

    # generate the dataset
    ctable = CharacterTable(chars, MAXLEN)
    X_train, X_val, y_train, y_val = generate_train_data(nsamples) 

    # build the model
    m = model(nh=n_hidden,
              nc=n_classes, 
              ne=voc_size, 
              de=dim_embedding)

    # training
    for epoch in range(nepochs):
        for i, (x, y) in enumerate(zip(X_train, y_train)):
            sentence_enc, sentence_dec, target = preprocess(x, y)
            m.train(sentence_enc, sentence_dec, target, lr)
            print "%.2f %% completed\r" % ((i + 1) * 100. / len(X_train)), 
            sys.stdout.flush()
        print

    m.generate_text(data_idxs_enc, batch_size, max_generation_length)
def main(nsamples=10000,
         n_hidden=128,
         lr=0.01,
         nepochs=100,
         batch_size=64,
         val_freq=1):
    INVERT = False
    DIGITS = 3
    MAXLEN = DIGITS + 1 + DIGITS
    chars = '0123456789+ '
    n_classes = len('0123456789') + 1  # add <eos>
    voc_size = len('0123456789+') + 1  # add <bos> for the decoder

    # generate the dataset
    ctable = CharacterTable(chars, MAXLEN)
    X_train, X_val, y_train, y_val = generate_train_data(nsamples)

    # build the model
    m = model(nh=n_hidden,
              nc=n_classes,
              ne=voc_size,
              batch_size=batch_size,
              natt=20)

    b_sentence_enc = np.zeros((batch_size, MAXLEN)).astype('int32')
    b_sentence_dec = np.zeros((batch_size, DIGITS + 2)).astype('int32')
    b_target = np.zeros((batch_size, DIGITS + 2)).astype('int32')
    print(m.debug(b_sentence_enc, b_sentence_dec, b_target))
    # training
    for epoch in range(nepochs):
        nlls = []
        for batch_num in range(len(X_train) / batch_size):
            b_sentence_enc = np.zeros((batch_size, MAXLEN)).astype('int32')
            b_sentence_dec = np.zeros((batch_size, DIGITS + 1)).astype('int32')
            b_target = np.zeros((batch_size, DIGITS + 1)).astype('int32')
            for i in range(batch_size):
                x, y = X_train[batch_num * batch_size + i], y_train[batch_num * batch_size + i]
                sentence_enc, sentence_dec, target = preprocess(x, y)
                b_sentence_enc[i,] = sentence_enc
                b_sentence_dec[i,] = sentence_dec
                b_target[i,] = target
                nlls += [m.train(b_sentence_enc, b_sentence_dec, b_target, lr)]

            print "%.2f %% completedi - nll = %.2f\r" % ((i + 1) * 100. / len(X_train), np.mean(nlls)),
            sys.stdout.flush()
        print

        # evaluation
        if (epoch + 1) % val_freq == 0:
            for i, (x, y) in enumerate(zip(X_val, y_val)):
                sentence_enc, sentence_dec, target = preprocess(x, y)
                y_pred = m.generate_text(sentence_enc)
                try:
                    print "ground-truth\t", np.concatenate([[sentence_dec[1]], target[:-1]])
                    print "predicted   \t", y_pred
                except IndexError:
                    pass
                if i > 5:
                    break
Exemple #5
0
def main(nsamples=10000,
         dim_embedding=15,
         n_hidden=128,
         lr=0.01,
         nepochs=100,
         val_freq=1):

    INVERT = False
    DIGITS = 3
    MAXLEN = DIGITS + 1 + DIGITS
    chars = '0123456789+ '
    n_classes = len('0123456789') + 1 # add <eos>
    voc_size = len('0123456789+') + 1 # add <bos> for the decoder 

    # generate the dataset
    ctable = CharacterTable(chars, MAXLEN)
    X_train, X_val, y_train, y_val = generate_train_data(nsamples) 

    # build the model
    m = model(nh=n_hidden,
              nc=n_classes, 
              ne=voc_size, 
              de=dim_embedding)

    # training
    for epoch in range(nepochs):
        nlls = []
        for i, (x, y) in enumerate(zip(X_train, y_train)):
            sentence_enc, sentence_dec, target = preprocess(x, y)
            nlls += [m.train(sentence_enc, sentence_dec, target, lr)]
            #print("%.2f %% completed - nll = %.2f\r" % ((i + 1) * 100. / len(X_train), np.mean(nlls)),end=" ")
            print("%.2f %% completed - nll = %.2f\r" % ((i + 1) * 100. / len(X_train), np.mean(nlls)))            
            sys.stdout.flush()
        print()
        # evaluation
        if (epoch + 1) % val_freq == 0: 
            print ("Epoch %d",epoch)
            for i, (x, y) in enumerate(zip(X_val, y_val)):
                sentence_enc, sentence_dec, target = preprocess(x, y)
                y_pred = m.generate_text(sentence_enc)
                try:
                    print ("ground-truth\t", np.concatenate([[sentence_dec[1]], target[:-1]]))
                    print ("predicted   \t", y_pred)
                except IndexError:
                    pass
                if i > 5:
                    break
Exemple #6
0
def main(nsamples=10000,
         dim_embedding=15,
         n_hidden=128,
         lr=0.01,
         nepochs=100,
         val_freq=1):

    INVERT = False
    DIGITS = 3
    MAXLEN = DIGITS + 1 + DIGITS
    chars = '0123456789+ '
    n_classes = len('0123456789') + 1  # add <eos>
    voc_size = len('0123456789+') + 1  # add <bos> for the decoder

    # generate the dataset
    ctable = CharacterTable(chars, MAXLEN)
    X_train, X_val, y_train, y_val = generate_train_data(nsamples)

    # build the model
    m = model(nh=n_hidden, nc=n_classes, ne=voc_size, de=dim_embedding)

    # training
    for epoch in range(nepochs):
        nlls = []
        for i, (x, y) in enumerate(zip(X_train, y_train)):
            sentence_enc, sentence_dec, target = preprocess(x, y)
            nlls += [m.train(sentence_enc, sentence_dec, target, lr)]
            #print("%.2f %% completed - nll = %.2f\r" % ((i + 1) * 100. / len(X_train), np.mean(nlls)),end=" ")
            print("%.2f %% completed - nll = %.2f\r" %
                  ((i + 1) * 100. / len(X_train), np.mean(nlls)))
            sys.stdout.flush()
        print()
        # evaluation
        if (epoch + 1) % val_freq == 0:
            print("Epoch %d", epoch)
            for i, (x, y) in enumerate(zip(X_val, y_val)):
                sentence_enc, sentence_dec, target = preprocess(x, y)
                y_pred = m.generate_text(sentence_enc)
                try:
                    print("ground-truth\t",
                          np.concatenate([[sentence_dec[1]], target[:-1]]))
                    print("predicted   \t", y_pred)
                except IndexError:
                    pass
                if i > 5:
                    break
    # output layer
    X = Flatten()(X)
    X = Dense(40, activation='relu', name='fc' + str(40))(X);
    X = Dropout(rate = 0.5)(X);
    
    X = Dense(channels, activation=None, name='fc' + str(channels))(X);
    
    # Create model
    color_model = Model(inputs = X_input, outputs = X, name='ColorNet');
    
    return color_model;

#X_train = np.load('X_train.npy'); Y_train = np.load('Y_train.npy');
#X_test = np.load('X_test.npy'); Y_test = np.load('Y_test.npy');
    
X_train, Y_train, _ = generate_train_data(train_size = 9000, set_name = 'Shi-Gehler', patch_size = (64, 64));
X_test, Y_test, _ = generate_test_data(test_size = 2420, set_name = 'Shi-Gehler', patch_size = (64, 64));

rmsprop = optimizers.RMSprop(lr = 0.001, rho=0.9, epsilon=None, decay=0.0);

cc_model = ColorNet(input_shape = X_train.shape[1:4]);
cc_model.compile(optimizer = 'Adam', loss = 'cosine_proximity', metrics = ['acc']);

estimate = cc_model.fit(X_train, Y_train, validation_split = 0.3333, epochs = 20, batch_size = 160);

preds = cc_model.evaluate(X_test, Y_test);
print();
print ("Loss = " + str(preds[0]));
print ("Test Accuracy = " + str(preds[1]));

# serialize model to JSON