Exemple #1
0
def test_mask_loss_network():
    model = Sequential()
    model.add(Dense(16*16, input_dim=16*16))
    model.add(Reshape((1, 16, 16)))
    net_out = model.get_output()

    net_in = model.get_input()
    th_mask = T.tensor4()
    loss = mask_loss(th_mask, net_out)['loss']
    updates = Adam().get_updates(model.params, model.constraints, loss)
    train_fn = theano.function([th_mask, net_in], [loss], updates=updates)

    nb_batches = 32
    mask_idx = next(masks(64*nb_batches, scales=[0.25]))
    z = np.random.uniform(low=-1, high=1, size=mask_idx.shape).reshape((-1, 16*16)).astype(np.float32)
    first_loss = 0

    epochs = 30
    nb_batches = 10
    for i, mask_idx in enumerate(itertools.islice(masks(64*nb_batches, scales=[0.25]), epochs)):
        z = np.random.uniform(low=-1, high=1, size=mask_idx.shape
                              ).reshape((-1, 16*16)).astype(np.float32)
        loss = train_fn(mask_idx, z)
        # print(loss)
        if i == 0:
            first_loss = loss

    assert first_loss > loss
Exemple #2
0
      def __init__(self):
          left = Sequential()
          left.add(Dense(784, 50))
          left.add(Activation('relu'))
          
          model = Sequential()
          model.add(Merge([left, left], mode='sum'))
          
          model.add(Dense(50, 10))
          model.add(Activation('softmax'))
          pdb.set_trace()

          model = Sequential()

          left = Sequential()
          num_kernel = 32
          l1_penalty = 0.0001
          b_mode = 'full'
          left.add(Convolution2D(num_kernel, 3, 2, 2,  W_regularizer=l1(l1_penalty), border_mode=b_mode))
          left.add(Convolution2D(num_kernel, num_kernel, 2, 2, W_regularizer=l1(l1_penalty), border_mode=b_mode))
          left.add(LeakyReLU(0.1))
          #left.add(Activation('relu'))
          left.add(MaxPooling2D(poolsize=(2, 2)))
          #left.add(Convolution2D(num_kernel, 3, 2, 2,  W_regularizer=l1(l1_penalty), border_mode=b_mode))
          #left.add(Convolution2D(num_kernel, num_kernel, 2, 2, W_regularizer=l1(l1_penalty), border_mode=b_mode))
          #left.add(LeakyReLU(0.1))
          ##left.add(Activation('relu'))
          #left.add(MaxPooling2D(poolsize=(2, 2)))

          model.add(Merge([left, left], mode='sum'))
          pdb.set_trace()
          self.f = theano.function(model.get_input(), model.get_output())
Exemple #3
0
class TestRecursive(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        super(TestRecursive, self).__init__(*args, **kwargs)
        self.input_dim = 2
        self.state_dim = 2
        self.model = Recursive(return_sequences=True)
        self.model.add_input('input', ndim=3)  # Input is 3D tensor
        self.model.add_state('h', dim=self.state_dim)
        self.model.add_node(Dense(self.input_dim + self.state_dim,
                                  self.state_dim,
                                  init='one'),
                            name='rec',
                            inputs=['input', 'h'],
                            return_state='h')
        self.model.add_node(Activation('linear'),
                            name='out',
                            input='rec',
                            create_output=True)

        self.model2 = Sequential()
        self.model2.add(
            SimpleRNN(input_dim=self.input_dim,
                      activation='linear',
                      inner_init='one',
                      output_dim=self.state_dim,
                      init='one',
                      return_sequences=True))

    def test_step(self):
        XX = T.matrix()
        HH = T.matrix()
        A = self.model._step(XX, HH)
        F = function([XX, HH], A)
        x = np.ones((1, 2))
        h = np.ones((1, 2))
        y = F(x, h)
        r = np.asarray([[4., 4.]])
        assert_allclose([r, r], y)

    def test_get_get_output(self):
        X = self.model.get_input()
        Y = self.model._get_output()
        F = function([X], Y, allow_input_downcast=True)

        x = np.ones((3, 5, self.input_dim))
        y = F(x)
        print y

        X2 = self.model2.get_input()
        Y2 = self.model2.get_output()
        F2 = function([X2], Y2)
        y2 = F2(x)

        assert_allclose(y2, y[1])
Exemple #4
0
class TestOrthoRNN(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        super(TestOrthoRNN, self).__init__(*args, **kwargs)
        self.input_dim = 2
        self.state_dim = 2
        self.model = Recursive(return_sequences=True)
        self.model.add_input('input', ndim=3)  # Input is 3D tensor
        self.model.add_state('h', dim=self.state_dim)
        self.model.add_node(Dense(self.input_dim, self.state_dim,
                                  init='one'), name='i2h',
                            inputs=['input', ])
        self.model.add_node(Dense(self.state_dim, self.state_dim,
                                  init='orthogonal'), name='h2h',
                            inputs=['h', ])
        self.model.add_node(Lambda(lambda x: x), name='rec',
                            inputs=['i2h', 'h2h'], merge_mode='sum',
                            return_state='h',
                            create_output=True)

        self.model2 = Sequential()
        self.model2.add(SimpleRNN(input_dim=self.input_dim, activation='linear',
                                  inner_init='one',
                                  output_dim=self.state_dim, init='one',
                                  return_sequences=True))
        U = self.model.nodes['h2h'].W.get_value()
        self.model2.layers[0].U.set_value(U)

    def test_step(self):
        XX = T.matrix()
        HH = T.matrix()
        A = self.model._step(XX, HH)
        F = function([XX, HH], A, on_unused_input='warn')
        x = np.ones((1, 2))
        h = np.ones((1, 2))
        y = F(x, h)
        assert(y[-1].shape == (1, 2))

    def test_get_get_output(self):
        X = self.model.get_input()
        Y = self.model._get_output()
        F = function([X], Y, allow_input_downcast=True)

        x = np.ones((3, 5, self.input_dim))
        y = F(x)
        print y

        X2 = self.model2.get_input()
        Y2 = self.model2.get_output()
        F2 = function([X2], Y2)
        y2 = F2(x)

        assert_allclose(y2, y[-1])
Exemple #5
0
class TestRecursive(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        super(TestRecursive, self).__init__(*args, **kwargs)
        self.input_dim = 2
        self.state_dim = 2
        self.model = Recursive(return_sequences=True)
        self.model.add_input('input', ndim=3)  # Input is 3D tensor
        self.model.add_state('h', dim=self.state_dim)
        self.model.add_node(Dense(self.input_dim + self.state_dim, self.state_dim,
                                  init='one'), name='rec',
                            inputs=['input', 'h'],
                            return_state='h')
        self.model.add_node(Activation('linear'), name='out', input='rec',
                            create_output=True)

        self.model2 = Sequential()
        self.model2.add(SimpleRNN(input_dim=self.input_dim, activation='linear',
                                  inner_init='one',
                                  output_dim=self.state_dim, init='one',
                                  return_sequences=True))

    def test_step(self):
        XX = T.matrix()
        HH = T.matrix()
        A = self.model._step(XX, HH)
        F = function([XX, HH], A, allow_input_downcast=True)
        x = np.ones((1, 2))
        h = np.ones((1, 2))
        y = F(x, h)
        r = np.asarray([[4., 4.]])
        assert_allclose([r, r], y)

    def test_get_get_output(self):
        X = self.model.get_input()
        Y = self.model._get_output()
        F = function([X], Y, allow_input_downcast=True)

        x = np.ones((3, 5, self.input_dim)).astype(floatX)
        y = F(x)
        print y

        X2 = self.model2.get_input()
        Y2 = self.model2.get_output()
        F2 = function([X2], Y2)
        y2 = F2(x)

        assert_allclose(y2, y[1])
Exemple #6
0
    def __init__(self):
        left = Sequential()
        left.add(Dense(784, 50))
        left.add(Activation('relu'))

        model = Sequential()
        model.add(Merge([left, left], mode='sum'))

        model.add(Dense(50, 10))
        model.add(Activation('softmax'))
        pdb.set_trace()

        model = Sequential()

        left = Sequential()
        num_kernel = 32
        l1_penalty = 0.0001
        b_mode = 'full'
        left.add(
            Convolution2D(num_kernel,
                          3,
                          2,
                          2,
                          W_regularizer=l1(l1_penalty),
                          border_mode=b_mode))
        left.add(
            Convolution2D(num_kernel,
                          num_kernel,
                          2,
                          2,
                          W_regularizer=l1(l1_penalty),
                          border_mode=b_mode))
        left.add(LeakyReLU(0.1))
        #left.add(Activation('relu'))
        left.add(MaxPooling2D(poolsize=(2, 2)))
        #left.add(Convolution2D(num_kernel, 3, 2, 2,  W_regularizer=l1(l1_penalty), border_mode=b_mode))
        #left.add(Convolution2D(num_kernel, num_kernel, 2, 2, W_regularizer=l1(l1_penalty), border_mode=b_mode))
        #left.add(LeakyReLU(0.1))
        ##left.add(Activation('relu'))
        #left.add(MaxPooling2D(poolsize=(2, 2)))

        model.add(Merge([left, left], mode='sum'))
        pdb.set_trace()
        self.f = theano.function(model.get_input(), model.get_output())
    I, V, sw = get_sample(n_bits=input_dim, max_size=20, min_size=1, batch_size=100)

    loss1 = model.train_on_batch(I, V, sample_weight=sw)
    loss2 = lstm.train_on_batch(I, V, sample_weight=sw)

    progbar.add(1, values=[("NTM", loss1), ("LSTM", loss2)])

    if e % 500 == 0:
        print("")
        acc1 = test_model(model, 'ntm.png')
        acc2 = test_model(lstm, 'lstm.png')
        print("NTM  test acc: {}".format(acc1))
        print("LSTM test acc: {}".format(acc2))

##### VISUALIZATION #####
X = model.get_input()
Y = ntm.get_full_output()[0:3]  # (memory over time, read_vectors, write_vectors)
F = function([X], Y, allow_input_downcast=True)

inp, out, sw = get_sample(1, 8, 21, 20)
mem, read, write = F(inp.astype('float32'))
Y = model.predict(inp)

plt.figure(figsize=(15, 12))

plt.subplot(221)
plt.imshow(write[0])
plt.xlabel('memory location')
plt.ylabel('time')
plt.title('write')
Exemple #8
0
detector.add(Dropout(.3))
detector.add(Dense(1)) # 1: Yes, it belongs to S, 0: fake!
detector.add(Activation('sigmoid'))

# Fully Connected model

sampler = Sequential()
sampler.add(Dense(dim, input_dim=dim))
sampler.add(lrelu())
sampler.add(Dense(dim))
sampler.add(lrelu())
sampler.add(Dense(mnist_dim))
sampler.add(Activation('sigmoid'))

# This is G itself!!!
sample_fake = theano.function([sampler.get_input()], sampler.get_output())

# We add the detector G on top, but it won't be adapted with this cost function.
# But here is a dirty hack: Theano shared variables on the GPU are the same for
# `detector` and `detector_no_grad`, so, when we adapt `detector` the values of
# `detector_no_grad` will be updated as well. But this only happens following the
# correct gradients.
# Don't you love pointers? Aliasing can be our friend sometimes.
detector.trainable = False
sampler.add(detector)

opt_g = Adam(lr=.001) # I got better results when
                      # detector's learning rate is faster
sampler.compile(loss='binary_crossentropy', optimizer=opt_g)

# debug
model.add(Convolution2D(nb_classes, 1, 1, border_mode='valid'))
vc = BatchNormalization()
model.add(vc)
model.add(Flatten())
#model.add(Dense(nb_classes))
model.add(Activation('softmax'))

model.load_weights("/data/lisatmp4/sarath/data/output/conv/1/weights.hdf5"
                   )  #/data/lisatmp4/chinna/data/ift6268/temp/1/weights.hdf5")

# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)

convout = theano.function([model.get_input(train=False)],
                          vc.get_output(train=False))
t0 = time.clock()
[layer_output] = convout(im)
print(layer_output.shape)

dpath = "/data/lisatmp4/chinna/data/ift6268/temp/1/"

for i in range(0, 10):
    convert_to_image(layer_output[i], dpath + str(i) + "old.jpg")
    layer_output[i] = add_gnoise_util(layer_output[i])
    print(max(layer_output[i].flatten()))
    convert_to_image(layer_output[i], dpath + str(i) + ".jpg")
print("Time")
print(time.clock() - t0)
itemModel = Sequential()
itemModel.add(TimeDistributedDense(943, 500))
itemModel.add(Activation('tanh'))
itemModel.add(Dropout(0.4))
itemModel.add(TimeDistributedDense(500, 500))
itemModel.add(Activation('tanh'))
##itemModel.add(Reshape(4))
##itemModel.add(Dense(4, 2))
model=Sequential()
model.add(MaxDot([userModel,itemModel])) #should output 2 values 
#model.add(TimeDistributedDense(300, 1))
##model.add(Activation('normalization'))
model.add(Reshape(2))
y_score= model.get_output(train=False)
x_test=model.get_input(train=False)
model.add(Activation('softmax'))
##model.add(Merge([userModel, itemModel], mode='sum'))


print('done model construction')
model.compile(loss='categorical_crossentropy', optimizer='Adadelta')
print('done complie')
scoring= theano.function(x_test,y_score,allow_input_downcast=True, mode=None)
history = model.fit([user ,Items] ,y_train, nb_epoch=100, batch_size=2048, verbose=2, show_accuracy=True)

#history = model.train_on_batch([user ,Items] ,y_train,accuracy=True)# nb_epoch=10, batch_size=1024, verbose=2, show_accuracy=True)
print('done training')
user_test ,Items_test, y_test = load_dataset(r"C:\Users\t-alie\Downloads\movieLens_1M\movielens.userstest100k.centered",r"C:\Users\t-alie\Downloads\movieLens_1M\movielens.itemstest100k",r"C:\Users\t-alie\Downloads\movieLens_1M\movielens.itemstest100k.fakeneg",50781)
y_p=model.custom_predict([user_test,Items_test],scoring)
#y_pp=model.predict([user_test,Items_test])
Exemple #11
0
# build a loss function that maximizes the activation
# of the nth filter of the layer considered
layer_output = layer_dict[layer_name].get_output()
print("YODA Layer-output shape",layer_output.shape)
loss = K.mean(layer_output[:,filter_index])

model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch)



input_img_data = [X_train[0,:,:,:]]
sp.misc.imsave('test.jpg',input_img_data)



input_img = model.get_input() 
score = model.evaluate(X_test, Y_test, batch_size=batch_size)
grads = K.gradients(loss,input_img)
iterate = K.function([input_img], [loss, grads])


print("YODA_1")
step = 0.01
for i in range(10):
   loss_value, grads_value = iterate([input_img_data])
   input_img_data += grads_value*step
score = model.predict_stochastic(input_img_data,batch_size=batch_size)
print(score)
print("YODA")
json_string = model.to_json()
open('model_200_arch.json', 'w').write(json_string)
Exemple #12
0
class TestOrthoRNN(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        super(TestOrthoRNN, self).__init__(*args, **kwargs)
        self.input_dim = 2
        self.state_dim = 2
        self.model = Recursive(return_sequences=True)
        self.model.add_input('input', ndim=3)  # Input is 3D tensor
        self.model.add_state('h', dim=self.state_dim)
        self.model.add_node(Dense(self.input_dim, self.state_dim, init='one'),
                            name='i2h',
                            inputs=[
                                'input',
                            ])
        self.model.add_node(Dense(self.state_dim,
                                  self.state_dim,
                                  init='orthogonal'),
                            name='h2h',
                            inputs=[
                                'h',
                            ])
        self.model.add_node(Lambda(lambda x: x),
                            name='rec',
                            inputs=['i2h', 'h2h'],
                            merge_mode='sum',
                            return_state='h',
                            create_output=True)

        self.model2 = Sequential()
        self.model2.add(
            SimpleRNN(input_dim=self.input_dim,
                      activation='linear',
                      inner_init='one',
                      output_dim=self.state_dim,
                      init='one',
                      return_sequences=True))
        U = self.model.nodes['h2h'].W.get_value()
        self.model2.layers[0].U.set_value(U)

    def test_step(self):
        XX = T.matrix()
        HH = T.matrix()
        A = self.model._step(XX, HH)
        F = function([XX, HH], A, on_unused_input='warn')
        x = np.ones((1, 2))
        h = np.ones((1, 2))
        y = F(x, h)
        assert (y[-1].shape == (1, 2))

    def test_get_get_output(self):
        X = self.model.get_input()
        Y = self.model._get_output()
        F = function([X], Y, allow_input_downcast=True)

        x = np.ones((3, 5, self.input_dim))
        y = F(x)
        print y

        X2 = self.model2.get_input()
        Y2 = self.model2.get_output()
        F2 = function([X2], Y2)
        y2 = F2(x)

        assert_allclose(y2, y[-1])
Exemple #13
0
def diffraction():
    """
    Train and test a simple classification for two groups of diffraction data
    Run on GPU: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 

    Parameters
    ----------
    parameter_01 : type
        Description.

    parameter_02 : type
        Description.

    parameter_03 : type
        Description.

    Returns
    -------
    return_01
        Description.
    """
    np.random.seed(1337)  # for reproducibility

    batch_size = 50
    nb_classes = 2
    nb_epoch = 20
    # input image dimensions
    img_rows, img_cols = 100, 100
    # number of convolutional filters to use
    nb_filters = 32
    # size of pooling area for max pooling
    nb_pool = 2
    # convolution kernel size
    nb_conv = 3

    # the data, shuffled and split between train and test sets
    texture = np.load('textureCrops.npy')
    standard = np.load('standardCrops.npy')


    # shuffle them
    #np.random.shuffle(texture)
    #np.random.shuffle(standard)
    #np.random.shuffle(extra)

    X_train = np.zeros((2000,100,100))
    X_train[0:999] = texture[0:999]
    X_train[1000:1999] = standard[0:999]

    #X_train[100:199] = 0.7

    y_train = np.zeros(2000)
    y_train[1000:1999] = 1

    X_test = np.zeros((2000,100,100))
    X_test[0:999] = texture[2000:2999]
    X_test[1000:1999]=standard[1000:1999]
    print(X_test.shape)
    y_test = np.zeros(2000)
    y_test[1000:1999] = 1
    print(y_test.shape)
    X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
    X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    #X_train = np.log(X_train)
    #X_test = np.log(X_test)
    #X_train = (X_train-X_train.max())/(X_train.min()-X_train.max())
    #X_test = (X_test-X_test.max())/(X_test.min()-X_test.max())
    train_mean = np.mean(X_train)
    train_std = np.std(X_train)
    X_train = (X_train-train_mean)/train_std
    #X_train = abs(X_train/X_train.max())
    test_mean = np.mean(X_test)
    test_std =np.std(X_test)
    X_test = (X_test-test_mean)/test_std
    #X_test = abs(X_test/X_test.max())

    print(X_test.max(),X_test.min())
    #print('adding noise')
    #noise_tmp = abs(np.random.normal(0, 0.02,(400,200,200))).astype('float32')
    #print('creat noise')
    #X_test = np.add(X_test,noise_tmp)
    print('X_train shape:', X_train.shape)
    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')
    #X_train[100:199] = 0.7 
    #X_test[200:299] = 0.6
    # convert class vectors to binary class matrices
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)

    model = Sequential()

    model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
                            border_mode='valid',
                            input_shape=(1, img_rows, img_cols)))
    model.add(Activation('relu'))
    model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(nb_filters*2, nb_conv,nb_conv))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='adadelta')

    model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
              show_accuracy=True, verbose=1, validation_data=(X_test, Y_test))
    score = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
    plot(model, to_file='model.png')
    print('Test score:', score[0])
    print('Test accuracy:', score[1])

    print('Predicting')
    start = time.clock()
    predicted_output = model.predict(X_test, batch_size=batch_size)
    print('The prediction time for 2000 samples is:',time.clock()-start)
    np.save('labels',Y_test)
    np.save('predicted_output',predicted_output)
    print('Predcited class',predicted_output)
    i = 1
    margin = 5
    n = 15
    # Visualize the first layer of convolutions on an input image
    X = X_test[i:i+1]
    img =X_test[0,:,:,:]
    img_width = X.shape[2]
    img_height = X.shape[3]
    width = n * img_width + (n - 1) * margin
    height = n * img_height + (n - 1) * margin

    stitched_filters=np.zeros((1,width,height))
    for i in range(n):
        for j in range(n):
            img =X_test[n,:,:,:]
            stitched_filters[:, (img_width + margin) * i: (img_width + margin)	* i + img_width, (img_height + margin) * j:(img_height + margin) * j + img_height] = img

    fb = np.zeros((width,height))
    fb = stitched_filters[0]
    imsave('conv.png',fb )



    # Visualize weights
    W = model.layers[0].W.get_value(borrow=True)
    W = np.squeeze(W)
    print("W shape : ", W.shape[0], W.shape[1:])
    n = 6
    img_width = W.shape[1]
    img_height = W.shape[2]
    width = n * img_width + (n - 1) * margin
    height = n * img_height + (n - 1) * margin

    stitched_filters=np.zeros((1,width,height))
    for i in range(n):
        for j in range(n):
            index=i*n+j
            if index < W.shape[0]:
                img =W[j]
                stitched_filters[:, (img_width + margin) * i: (img_width + margin)  * i + img_width, (img_height + margin) * j:(img_height + margin) * j + img_height] = img

    fb = np.zeros((width,height))
    fb = stitched_filters[0]
    imsave('weight.png',fb )





    # Visualize convolution result (after activation)
    convout1_f = theano.function([model.get_input(train=False)], convout1.get_output(train=False))
    W = convout1_f(X)
    W = np.squeeze(W)
    print("C1 shape : ", W.shape)

    n=6
    img_width = W.shape[1]
    img_height = W.shape[2]
    width = n * img_width + (n - 1) * margin
    height = n * img_height + (n - 1) * margin

    stitched_filters=np.zeros((1,width,height))
    for i in range(n):
        for j in range(n):
            index=i*n+j
            if index < W.shape[0]:
                #			print("index is", index)
                img =W[j]
                stitched_filters[:, (img_width + margin) * i: (img_width + margin)  * i + img_width, (img_height + margin) * j:(img_height + margin) * j + img_height] = img
    ff = np.zeros((width,height))
    ff = stitched_filters[0]
    plt.imshow(ff)
    plt.show()
    imsave('conf1.png',ff )

    print('Ploting Results')
    Y_predicted = np.zeros(len(predicted_output))
    for i in range(len(predicted_output)):
        if np.round(predicted_output[i,0]) ==1:
           Y_predicted[i] = 0
        else:
           Y_predicted[i] = 1
           
    xxx = range(len(Y_test))
    plt.subplot(2, 1, 1)
    plt.scatter(xxx,Y_test)
    plt.title('Expected')
    plt.ylim((-0.2, 1.2))
    plt.subplot(2, 1, 2)
    plt.scatter(xxx,Y_predicted)
    plt.title('Predicted')
    plt.ylim((-0.2, 1.2))
    plt.show()
Exemple #14
0
model.add(Convolution2D(28, 1, 3, 3, border_mode='full')) 
convout1 = Activation('relu')
model.add(convout1)


# Data loading + reshape to 4D
(X_train, y_train), (X_test, y_test) = mnist_dataset = mnist.load_data()
reshaped = X_train.reshape(X_train.shape[0], 1, X_train.shape[1], X_train.shape[2])


from random import randint
img_to_visualize = randint(0, len(X_train) - 1)


# Generate function to visualize first layer
convout1_f = theano.function([model.get_input(train=False)], convout1.get_output(train=False))
convolutions = convout1_f(reshaped[img_to_visualize: img_to_visualize+1])


#matplotlib inline
#The non-magical version of the previous line is this:
#get_ipython().magic(u'matplotlib inline')
imshow = plt.imshow #alias
plt.title("Image used: #%d (digit=%d)" % (img_to_visualize, y_train[img_to_visualize]))
imshow(X_train[img_to_visualize])


plt.title("First convolution:")
imshow(convolutions[0][0])

Exemple #15
0
def diffraction():
    """
    Train and test a simple classification for two groups of diffraction data
    Run on GPU: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 

    Parameters
    ----------
    parameter_01 : type
        Description.

    parameter_02 : type
        Description.

    parameter_03 : type
        Description.

    Returns
    -------
    return_01
        Description.
    """
    np.random.seed(1337)  # for reproducibility

    batch_size = 50
    nb_classes = 2
    nb_epoch = 20
    # input image dimensions
    img_rows, img_cols = 100, 100
    # number of convolutional filters to use
    nb_filters = 32
    # size of pooling area for max pooling
    nb_pool = 2
    # convolution kernel size
    nb_conv = 3

    # the data, shuffled and split between train and test sets
    #texture = np.load('textureCrops.npy')
    #standard = np.load('standardCrops.npy')

    texture = np.random.random((3000, 100, 100))
    standard = np.random.random((3000, 100, 100))
    extra = np.random.random((3000, 100, 100))

    # shuffle them
    #np.random.shuffle(texture)
    #np.random.shuffle(standard)
    #np.random.shuffle(extra)

    print('texture.shape', texture.shape)
    print(texture[1])

    X_train = np.zeros((2000, 100, 100))
    X_train[0:999] = texture[0:999]
    X_train[1000:1999] = standard[0:999]

    #X_train[100:199] = 0.7

    y_train = np.zeros(2000)
    y_train[1000:1999] = 1

    X_test = np.zeros((2000, 100, 100))
    X_test[0:999] = texture[2000:2999]
    X_test[1000:1999] = standard[1000:1999]
    print(X_test.shape)
    y_test = np.zeros(2000)
    y_test[1000:1999] = 1
    print(y_test.shape)
    X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
    X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train = np.log(X_train)
    X_test = np.log(X_test)
    #X_train = (X_train-X_train.max())/(X_train.min()-X_train.max())
    #X_test = (X_test-X_test.max())/(X_test.min()-X_test.max())
    train_mean = np.mean(X_train)
    train_std = np.std(X_train)
    X_train = (X_train - train_mean) / train_std
    #X_train = abs(X_train/X_train.max())
    test_mean = np.mean(X_test)
    test_std = np.std(X_test)
    X_test = (X_test - test_mean) / test_std
    #X_test = abs(X_test/X_test.max())

    print('X_test.max', X_test.max(), 'X_test.min', X_test.min())
    #print('adding noise')
    #noise_tmp = abs(np.random.normal(0, 0.02,(400,200,200))).astype('float32')
    #print('creat noise')
    #X_test = np.add(X_test,noise_tmp)
    print('X_train shape:', X_train.shape)
    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')
    #X_train[100:199] = 0.7
    #X_test[200:299] = 0.6
    # convert class vectors to binary class matrices
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)

    model = Sequential()

    model.add(
        Convolution2D(nb_filters,
                      nb_conv,
                      nb_conv,
                      border_mode='valid',
                      input_shape=(1, img_rows, img_cols)))
    model.add(Activation('relu'))
    model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(nb_filters * 2, nb_conv, nb_conv))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='adadelta')

    model.fit(X_train,
              Y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              show_accuracy=True,
              verbose=1,
              validation_data=(X_test, Y_test))
    score = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
    plot(model, to_file='model.png')
    print('Test score:', score[0])
    print('Test accuracy:', score[1])

    print('Predicting')
    start = time.clock()
    predicted_output = model.predict(X_test, batch_size=batch_size)
    print('The prediction time for 2000 samples is:', time.clock() - start)
    np.save('labels', Y_test)
    np.save('predicted_output', predicted_output)
    print('Predcited class', predicted_output)
    i = 1
    margin = 5
    n = 15
    # Visualize the first layer of convolutions on an input image
    X = X_test[i:i + 1]
    img = X_test[0, :, :, :]
    img_width = X.shape[2]
    img_height = X.shape[3]
    width = n * img_width + (n - 1) * margin
    height = n * img_height + (n - 1) * margin

    stitched_filters = np.zeros((1, width, height))
    for i in range(n):
        for j in range(n):
            img = X_test[n, :, :, :]
            stitched_filters[:, (img_width + margin) *
                             i:(img_width + margin) * i + img_width,
                             (img_height + margin) *
                             j:(img_height + margin) * j + img_height] = img

    fb = np.zeros((width, height))
    fb = stitched_filters[0]
    imsave('conv.png', fb)

    # Visualize weights
    W = model.layers[0].W.get_value(borrow=True)
    W = np.squeeze(W)
    print("W shape : ", W.shape[0], W.shape[1:])
    n = 6
    img_width = W.shape[1]
    img_height = W.shape[2]
    width = n * img_width + (n - 1) * margin
    height = n * img_height + (n - 1) * margin

    stitched_filters = np.zeros((1, width, height))
    for i in range(n):
        for j in range(n):
            index = i * n + j
            if index < W.shape[0]:
                img = W[j]
                stitched_filters[:, (img_width + margin) *
                                 i:(img_width + margin) * i + img_width,
                                 (img_height + margin) *
                                 j:(img_height + margin) * j +
                                 img_height] = img

    fb = np.zeros((width, height))
    fb = stitched_filters[0]
    imsave('weight.png', fb)

    # Visualize convolution result (after activation)
    convout1_f = theano.function([model.get_input(train=False)],
                                 convout1.get_output(train=False))
    W = convout1_f(X)
    W = np.squeeze(W)
    print("C1 shape : ", W.shape)

    n = 6
    img_width = W.shape[1]
    img_height = W.shape[2]
    width = n * img_width + (n - 1) * margin
    height = n * img_height + (n - 1) * margin

    stitched_filters = np.zeros((1, width, height))
    for i in range(n):
        for j in range(n):
            index = i * n + j
            if index < W.shape[0]:
                #			print("index is", index)
                img = W[j]
                stitched_filters[:, (img_width + margin) *
                                 i:(img_width + margin) * i + img_width,
                                 (img_height + margin) *
                                 j:(img_height + margin) * j +
                                 img_height] = img
    ff = np.zeros((width, height))
    ff = stitched_filters[0]
    plt.imshow(ff)
    plt.show()
    imsave('conf1.png', ff)

    print('Ploting Results')
    Y_predicted = np.zeros(len(predicted_output))
    for i in range(len(predicted_output)):
        if np.round(predicted_output[i, 0]) == 1:
            Y_predicted[i] = 0
        else:
            Y_predicted[i] = 1

    xxx = range(len(Y_test))
    plt.subplot(2, 1, 1)
    plt.scatter(xxx, Y_test)
    plt.title('Expected')
    plt.ylim((-0.2, 1.2))
    plt.subplot(2, 1, 2)
    plt.scatter(xxx, Y_predicted)
    plt.title('Predicted')
    plt.ylim((-0.2, 1.2))
    plt.show()
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))

sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)

model.load_weights(model_fname)

# from keras.utils.visualize_util import plot
# plot(model, to_file='../other/figures/cnn_model.png')
# exit()

convout1_f = theano.function([model.get_input(train=False)], convout1.get_output(train=False))
convout2_f = theano.function([model.get_input(train=False)], convout2.get_output(train=False))

# Convolution layer 1 weights
W = model.layers[0].W.get_value(borrow=True)
W = np.squeeze(W)
print("W shape : ", W.shape)

pl.figure(figsize=(15, 15))
pl.title('conv1 weights')
nice_imshow(pl.gca(), make_mosaic(W, 6, 6), cmap=cmap)
pl.savefig('../other/figures/cnn_weights.png',bbox_inches='tight', dpi=200)
pl.show()


# Visualize convolution 1 result (after activation)
x = create_input(sentence)

# build the model: 2 stacked LSTM
print("Build model...")
model = Sequential()
first_layer = LSTM(512, return_sequences=True, input_shape=(None, len(chars)))
model.add(first_layer)
model.add(Dropout(0.5))
second_layer = LSTM(512, return_sequences=True)
model.add(second_layer)
model.add(Dropout(0.5))
model.add(TimeDistributedDense(len(chars)))
model.add(Activation("softmax"))

print("creating function")
layer_output = theano.function([model.get_input(train=False)], second_layer.get_output(train=False))

W = layer_output(x)[0]
print(W.shape)

dists = []
for i in xrange(W.shape[0]):
    for j in xrange(i + 1, W.shape[0]):
        # m = (W[i] + W[j]) / 2
        # d = sum([cosine(W[k], m) for k in xrange(i, j)])
        d = euclidean(W[i], W[j])
        dists.append((d, i, j))

dists.sort()
for d, i, j in dists[:100]:
    print(sentence, i, j, d)
vc = BatchNormalization()
model.add(vc)
model.add(Flatten())
#model.add(Dense(nb_classes))
model.add(Activation('softmax'))


model.load_weights("/data/lisatmp4/sarath/data/output/conv/1/weights.hdf5")#/data/lisatmp4/chinna/data/ift6268/temp/1/weights.hdf5")

# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)



convout = theano.function([model.get_input(train=False)], vc.get_output(train=False))
t0 = time.clock()
[layer_output] = convout(im)
print(layer_output.shape)


dpath = "/data/lisatmp4/chinna/data/ift6268/temp/1/"

for i in range(0,10):
	convert_to_image(layer_output[i],dpath+str(i)+"old.jpg")
	layer_output[i] = add_gnoise_util(layer_output[i])
	print(max(layer_output[i].flatten()))
	convert_to_image(layer_output[i],dpath+str(i)+".jpg")	
print ("Time")
print (time.clock() - t0)
Exemple #19
0
model.add(Convolution2D(32, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))

model.add(Dense(nb_classes))
model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy', optimizer='adam')
XX = model.get_input()
YY = model.layers[0].get_output()
F = theano.function([XX], YY)
nb_epochs = 10  # you probably want to go longer than this
batch_size = 256
#fig = plt.figure()
try:
    for e in range(nb_epochs):
        print('-' * 40)
        progbar = generic_utils.Progbar(X_train.shape[0])
        for b in range(X_train.shape[0] / batch_size):
            f = b * batch_size
            l = (b + 1) * batch_size
            X_batch = X_train[f:l].astype('float32')
            y_batch = y_train[f:l].astype('float32')
            loss = model.train_on_batch(X_batch, y_batch)
x = create_input(sentence)

# build the model: 2 stacked LSTM
print('Build model...')
model = Sequential()
first_layer = LSTM(512, return_sequences=True, input_shape=(None, len(chars)))
model.add(first_layer)
model.add(Dropout(0.5))
second_layer = LSTM(512, return_sequences=True)
model.add(second_layer)
model.add(Dropout(0.5))
model.add(TimeDistributedDense(len(chars)))
model.add(Activation('softmax'))

print('creating function')
layer_output = theano.function([model.get_input(train=False)], second_layer.get_output(train=False))

W = layer_output(x)[0]
print(W.shape)

dists = []
for i in xrange(W.shape[0]):
    for j in xrange(i+1, W.shape[0]):
        # m = (W[i] + W[j]) / 2
        # d = sum([cosine(W[k], m) for k in xrange(i, j)])
        d = euclidean(W[i], W[j])
        dists.append((d, i, j))

dists.sort()
for d, i, j in dists[:100]:
    print(sentence, i, j, d)
Exemple #21
0
userModel = Sequential()
userModel.add(Dense(userfea, 700))
userModel.add(Activation('tanh'))
userModel.add(Dropout(0.4))
userModel.add(Dense(700, 500))
userModel.add(Activation('tanh'))

itemModel = Sequential()
itemModel.add(TimeDistributedDense(itemfea, 1000))
itemModel.add(Activation('tanh'))
itemModel.add(Dropout(0.4))
itemModel.add(TimeDistributedDense(1000, 500))
itemModel.add(Activation('tanh'))
##itemModel.add(Reshape(4))
##itemModel.add(Dense(4, 2))
itm = itemModel.get_input(train=False)
usr = userModel.get_input(train=False)
itemrep = itemModel.get_output(train=False)
userrep = userModel.get_output(train=False)
model = Sequential()
model.add(Cosine([userModel, itemModel]))  #should output 2 values
#model.add(TimeDistributedDense(300, 1))
##model.add(Activation('normalization'))
model.add(Reshape(2))
y_score = model.get_output(train=False)
x_test = model.get_input(train=False)
model.add(Activation('softmax'))
print("Complie model...")
model.compile(loss='categorical_crossentropy', optimizer='adam')
print("Complie outs...")
outv1 = theano.function([usr], userrep, allow_input_downcast=True, mode=None)