Example #1
0
def mask_blending_gan_new(offset_generator,
                          mask_generator,
                          discriminator,
                          nb_fake=64,
                          nb_real=32):
    assert len(mask_generator.input_shape) == 2
    assert len(offset_generator.input_shape) == 2

    g = Graph()
    mask_input_dim = mask_generator.input_shape[1]
    z_shape = (nb_fake, offset_generator.input_shape[1] - mask_input_dim)

    g.add_input(GAN.z_name, batch_input_shape=z_shape)

    g.add_node(Dense(32), 'gen_driver_dense_1', input=GAN.z_name)
    g.add_node(BatchNormalization(),
               'gen_driver_bn_1',
               input='gen_driver_dense_1')
    g.add_node(Activation('relu'), 'gen_driver_act_1', input='gen_driver_bn_1')

    g.add_node(Dense(mask_input_dim),
               'gen_driver_dense_2',
               input='gen_driver_act_1')
    g.add_node(BatchNormalization(),
               'gen_driver_bn_2',
               input='gen_driver_dense_2')
    g.add_node(Layer(), 'driver', input='gen_driver_bn_2')
    # g.add_node(ZeroGradient(), 'gen_driver_zero_grad', input='driver')

    g.add_node(mask_generator, 'mask_generator', input='driver')
    g.add_node(offset_generator, 'gen_offset', input=GAN.z_name)
    g.add_node(PyramidBlending(mask_generator,
                               input_pyramid_layers=3,
                               mask_pyramid_layers=2),
               'blending',
               input='gen_offset')
    reg_layer = Layer()
    act = ActivityInBoundsRegularizer(-1, 1)
    act.set_layer(reg_layer)
    reg_layer.regularizers = [act]
    g.add_node(reg_layer, GAN.generator_name, input='blending')

    real_shape = (nb_real, ) + g.nodes[GAN.generator_name].output_shape[1:]
    g.add_input(GAN.real_name, batch_input_shape=real_shape)
    g.add_node(discriminator,
               "discriminator",
               inputs=[GAN.generator_name, "real"],
               concat_axis=0)
    gan_outputs(g,
                fake_for_gen=(0, nb_fake - nb_real),
                fake_for_dis=(nb_fake - nb_real, nb_fake),
                real=(nb_fake, nb_fake + nb_real))
    return g
def model(input_shape):
    autoencoder = models.Sequential()
    # Add a noise layer to get a denoising autoencoder. This helps avoid overfitting
    autoencoder.add(Layer(input_shape=input_shape))

    #autoencoder.add(GaussianNoise(sigma=0.3))
    autoencoder.encoding_layers = create_encoding_layers()
    autoencoder.decoding_layers = create_decoding_layers()
    for i, l in enumerate(autoencoder.encoding_layers):
        autoencoder.add(l)
        print(i, l.input_shape, l.output_shape)
    for i, l in enumerate(autoencoder.decoding_layers):
        autoencoder.add(l)
        print(i, l.input_shape, l.output_shape)

    the_conv = (Convolution2D(
        num_classes,
        1,
        1,
        border_mode='valid',
    ))
    autoencoder.add(the_conv)
    print(the_conv.input_shape, the_conv.output_shape)
    autoencoder.add(Reshape(
        (num_classes, data_shape)))  #, input_shape=(num_classes,360,480)))
    autoencoder.add(Permute((2, 1)))
    autoencoder.add(Activation('softmax'))
    #from keras.optimizers import SGD
    #optimizer = SGD(lr=0.01, momentum=0.8, decay=0., nesterov=False)
    return autoencoder
Example #3
0
def get_offset_generator(n=32,
                         batch_input_shape=50,
                         nb_output_channels=1,
                         init=normal(0.02),
                         merge_layer=None):
    def middle_input_shape():
        input_shape = list(front.output_shape)
        if merge_layer is not None:
            input_shape[1] += merge_layer.output_shape[1]
        return tuple(input_shape)

    front = Sequential()
    front.add(
        Dense(8 * n * 4 * 4, batch_input_shape=batch_input_shape, init=init))
    front.add(BatchNormalization())
    front.add(Activation('relu'))
    front.add(Reshape((
        8 * n,
        4,
        4,
    )))

    up(front)  # 8x8
    conv(front, 4 * n, 3, 3)

    up(front)  # 16x16
    conv(front, 2 * n, 3, 3)

    middle = Sequential()
    middle.add(Layer(batch_input_shape=middle_input_shape()))
    up(middle)
    conv(middle, n, 3, 3)
    deconv(front, n, 3, 3)
    conv(middle, n, 3, 3)
    return front, middle
Example #4
0
 def add_input(self, name, ndim=3, dtype='float'):
     if name in self.namespace:
         raise Exception('Duplicate node identifier: ' + name)
     self.namespace.add(name)
     self.input_order.append(name)
     layer = Layer()  # empty layer
     if dtype == 'float':
         layer.input = ndim_tensor(ndim)
     else:
         if ndim == 2:
             layer.input = T.imatrix()
         else:
             raise Exception('Type "int" can only be used with ndim==2 (Embedding).')
     layer.input.name = name
     self.inputs[name] = layer
     self.input_config.append({'name': name, 'ndim': ndim, 'dtype': dtype})
Example #5
0
 def join(self, mo, vo):
     name = 'join_{}'.format(self.join_cnt)
     self.join_cnt += 1
     disconn = mo + '_disconn'
     self.g.add_node(Disconnected(), disconn, input=mo)
     self.g.add_node(Layer(), name, inputs=[disconn, vo], concat_axis=1)
     return self.add_output(name)
Example #6
0
 def add_input(self, name, ndim=3, dtype='float'):
     if name in self.namespace:
         raise Exception('Duplicate node identifier: ' + name)
     self.namespace.add(name)
     self.input_order.append(name)
     layer = Layer()  # empty layer
     if dtype == 'float':
         layer.input = ndim_tensor(ndim)
     else:
         if ndim == 2:
             layer.input = T.imatrix()
         else:
             raise Exception('Type "int" can only be used with ndim==2 (Embedding).')
     layer.input.name = name
     self.inputs[name] = layer
     self.input_config.append({'name': name, 'ndim': ndim, 'dtype': dtype})
Example #7
0
def test_deconvolution2d_variable():
    input_dim = 20
    z_shape = (64, input_dim)
    model = Sequential()
    z = Layer(input_shape=(input_dim, ))
    model = Sequential()
    model.add(z)
    model.add(Dense(8 * 4 * 4))
    model.add(Reshape((
        8,
        4,
        4,
    )))
    model.add(Activation('relu'))
    model.add(
        Deconv2DVariableWeights(z,
                                8,
                                3,
                                3,
                                subsample=(1, 1),
                                border_mode=(1, 1)))
    model.add(BatchNormalization(axis=1))
    model.add(Activation('relu'))
    model.compile('sgd', 'mse')

    img = np.random.uniform(-1, 1, z_shape).astype(np.float32)
    model.predict(img)
Example #8
0
def dcgan_generator_conv(n=32,
                         input_dim=50,
                         nb_output_channels=1,
                         init=normal(0.02)):
    def conv(nb_filter, h, w):
        model.add(Convolution2D(nb_filter, h, w, border_mode='same',
                                init=init))
        model.add(batch_norm())
        model.add(Activation('relu'))

    def deconv(nb_filter, h, w):
        deconv_layer = Deconvolution2D(nb_filter,
                                       h,
                                       w,
                                       border_mode=(1, 1),
                                       init=init)
        model.add(deconv_layer)

        w = np.random.normal(0, 0.02, deconv_layer.W_shape).astype(np.float32)
        w *= np.random.uniform(0, 1, (1, w.shape[1], 1, 1))
        deconv_layer.W.set_value(w)
        model.add(batch_norm())
        model.add(Activation('relu'))

    def up():
        model.add(UpSampling2D())

    z = Layer(input_shape=(input_dim, ))
    model = Sequential()
    model.add(z)
    model.add(Dense(8 * n * 4 * 4, init=init))
    model.add(batch_norm())
    model.add(Reshape((
        8 * n,
        4,
        4,
    )))
    model.add(Activation('relu'))

    up()  # 8
    conv(4 * n, 3, 3)
    up()  # 16
    conv(2 * n, 3, 3)
    conv(2 * n, 3, 3)
    up()  # 32
    conv(n, 3, 3)
    conv(n, 3, 3)
    up()  # 64
    conv(n, 3, 3)

    model.add(
        Deconvolution2D(nb_output_channels,
                        3,
                        3,
                        border_mode=(1, 1),
                        init=init))
    model.add(InBounds(-1, 1))
    return model
Example #9
0
def mask_blending_gan_new(offset_generator, mask_generator, discriminator,
                          nb_fake=64, nb_real=32):
    assert len(mask_generator.input_shape) == 2
    assert len(offset_generator.input_shape) == 2

    g = Graph()
    mask_input_dim = mask_generator.input_shape[1]
    z_shape = (nb_fake, offset_generator.input_shape[1] - mask_input_dim)

    g.add_input(GAN.z_name, batch_input_shape=z_shape)

    g.add_node(Dense(32), 'gen_driver_dense_1', input=GAN.z_name)
    g.add_node(BatchNormalization(), 'gen_driver_bn_1',
               input='gen_driver_dense_1')
    g.add_node(Activation('relu'), 'gen_driver_act_1',
               input='gen_driver_bn_1')

    g.add_node(Dense(mask_input_dim), 'gen_driver_dense_2',
               input='gen_driver_act_1')
    g.add_node(BatchNormalization(), 'gen_driver_bn_2',
               input='gen_driver_dense_2')
    g.add_node(Layer(), 'driver', input='gen_driver_bn_2')
    # g.add_node(ZeroGradient(), 'gen_driver_zero_grad', input='driver')

    g.add_node(mask_generator, 'mask_generator', input='driver')
    g.add_node(offset_generator, 'gen_offset',
               input=GAN.z_name)
    g.add_node(PyramidBlending(mask_generator, input_pyramid_layers=3,
                               mask_pyramid_layers=2),
               'blending', input='gen_offset')
    reg_layer = Layer()
    act = ActivityInBoundsRegularizer(-1, 1)
    act.set_layer(reg_layer)
    reg_layer.regularizers = [act]
    g.add_node(reg_layer, GAN.generator_name, input='blending')

    real_shape = (nb_real,) + g.nodes[GAN.generator_name].output_shape[1:]
    g.add_input(GAN.real_name, batch_input_shape=real_shape)
    g.add_node(discriminator, "discriminator",
               inputs=[GAN.generator_name, "real"], concat_axis=0)
    gan_outputs(g, fake_for_gen=(0, nb_fake - nb_real),
                    fake_for_dis=(nb_fake - nb_real, nb_fake),
                    real=(nb_fake, nb_fake+nb_real))
    return g
Example #10
0
def getFilter(img_w=256, img_h=256, dim=3, model='1'):
    filter = models.Sequential()
    filter.add(Layer(input_shape=(img_h, img_w, dim)))
    for l in getEncoder():
        filter.add(l)
    for l in getDecoder():
        filter.add(l)
    filter.add(Reshape((1, img_h*img_w), input_shape=(1, img_h, img_w)))
    filter.add(Permute((2, 1)))
    return filter
Example #11
0
def segnet():
    autoencoder = models.Sequential()
    # Add a noise layer to get a denoising autoencoder. This helps avoid overfitting
    autoencoder.add(Layer(input_shape=(im_dimension, im_height, im_width)))

    autoencoder.add(ZeroPadding2D(padding=(1,1)))
    autoencoder.add(Convolution2D(64, 3, 3, subsample = (1,1),border_mode='valid'))
    # print (autoencoder.summary())
    autoencoder.add(BatchNormalization())
    autoencoder.add(Activation('relu'))
    autoencoder.add(MaxPooling2D(pool_size=(2, 2)))
    # print (autoencoder.summary())
    autoencoder.add(ZeroPadding2D(padding=(1,1)))
    autoencoder.add(Convolution2D(64, 3, 3, border_mode='valid'))
    autoencoder.add(BatchNormalization())
    autoencoder.add(Activation('relu'))
    autoencoder.add(MaxPooling2D(pool_size=(2, 2)))

    autoencoder.add(ZeroPadding2D(padding=(1,1)))
    autoencoder.add(Convolution2D(128, 3, 3, border_mode='valid'))
    autoencoder.add(BatchNormalization())
    autoencoder.add(Activation('relu'))
    autoencoder.add(MaxPooling2D(pool_size=(2, 2)))


    autoencoder.add(ZeroPadding2D(padding=(1,1)))
    autoencoder.add(Convolution2D(256, 3, 3, border_mode='valid'))
    autoencoder.add(BatchNormalization())
    autoencoder.add(Activation('relu'))

    autoencoder.add(ZeroPadding2D(padding=(1,1)))
    autoencoder.add(Convolution2D(256, 3, 3, border_mode='valid'))
    autoencoder.add(BatchNormalization())

    autoencoder.add(UpSampling2D(size=(2,2)))
    autoencoder.add(ZeroPadding2D(padding=(1,1)))
    autoencoder.add(Convolution2D(128, 3, 3, border_mode='valid'))
    autoencoder.add(BatchNormalization())

    autoencoder.add(UpSampling2D(size=(2,2)))
    autoencoder.add(ZeroPadding2D(padding=(1,1)))
    autoencoder.add(Convolution2D(64, 3, 3, border_mode='valid'))
    autoencoder.add(BatchNormalization())

    autoencoder.add(UpSampling2D(size=(2,2)))
    autoencoder.add(ZeroPadding2D(padding=(1,1)))
    autoencoder.add(Convolution2D(64, 3, 3, border_mode='valid'))
    autoencoder.add(BatchNormalization())
    # autoencoder.add(Layer(input_shape=(im_dimension, im_height,im_width)))
    autoencoder.add(Convolution2D(nb_classes, 1, 1, border_mode='valid',))
    #import ipdb; ipdb.set_trace()
    autoencoder.add(Reshape((nb_classes,data_shape), input_shape=(nb_classes,im_height,im_width)))
    autoencoder.add(Permute((2, 1)))
    autoencoder.add(Activation('softmax'))
    return autoencoder
Example #12
0
def test_deconvolution2d_with_conv2d_gpu_contiguous():
    input_shape = (64, 1, 8, 8)
    model = Sequential()
    model.add(Layer(batch_input_shape=input_shape))
    model.add(Deconvolution2D(8, 3, 3, subsample=(1, 1), border_mode=(1, 1)))
    model.add(BatchNormalization(axis=1))
    model.add(Activation('relu'))
    model.add(Convolution2D(10, 3, 3, border_mode='same'))
    model.add(BatchNormalization(axis=1))
    model.add(Activation('relu'))
    model.add(Deconvolution2D(1, 3, 3, subsample=(2, 2), border_mode=(1, 1)))
    model.compile('sgd', 'mse')

    img = np.random.sample((64, 1, 16, 16)).astype(np.float32)
    model.predict(img)
    def __init__(self):
        data_shape = 360 * 480
        curRosPackage = rospkg.RosPack()
        self.curModulePath = curRosPackage.get_path('hello_real_sense')
        print 'creating the classifier model'
        # initialize
        autoEncoderModel = models.Sequential()
        # create the input layer
        autoEncoderModel.add(Layer(input_shape=(3, 360, 480)))
        # create the encoding layer
        autoEncoderModel.encoding_layers = self.create_encoding_layers()
        # create the decoding layer
        autoEncoderModel.decoding_layers = self.create_decoding_layers()

        # add the encoding layers to
        for l in autoEncoderModel.encoding_layers:
            autoEncoderModel.add(l)
        # then add decoding layers
        for l in autoEncoderModel.decoding_layers:
            autoEncoderModel.add(l)

        # set colors for the mask based on the classifier output
        Unlabelled = [0, 0, 0]
        Pavement = [255, 255, 255]

        self.label_colours = np.array([
            Unlabelled, Unlabelled, Unlabelled, Pavement, Unlabelled,
            Unlabelled, Unlabelled, Unlabelled, Unlabelled, Unlabelled,
            Unlabelled, Unlabelled
        ])

        autoEncoderModel.add(Convolution2D(
            12,
            1,
            1,
            border_mode='valid',
        ))
        autoEncoderModel.add(
            Reshape((12, data_shape), input_shape=(12, 360, 480)))
        autoEncoderModel.add(Permute((2, 1)))
        autoEncoderModel.add(Activation('softmax'))

        print 'loading weights for classifier'
        # autoEncoderModel.save_weights('model_weight_ep100.hdf5')
        autoEncoderModel.load_weights(self.curModulePath +
                                      '/classifier/seg_net_weights.hdf5')
        self.imgClassifier = autoEncoderModel
        print 'done loading weights'
Example #14
0
def SegNet():
    with tf.device('/gpu:0'):
        segnet_basic = models.Sequential()

        segnet_basic.add(Layer(input_shape=(256, 256, 1)))

        segnet_basic.encoding_layers = create_encoding_layers()
        for l in segnet_basic.encoding_layers:
            segnet_basic.add(l)

        # Note: it this looks weird, that is because of adding Each Layer using that for loop
        # instead of re-writting mode.add(somelayer+params) everytime.

        segnet_basic.decoding_layers = create_decoding_layers()
        for l in segnet_basic.decoding_layers:
            segnet_basic.add(l)

        segnet_basic.add(Conv2D(1, (1, 1), padding='same'))
        return segnet_basic
		def create(self):
			language_model = Sequential()
			language_model.add(Embedding(
			self._config.input_dim, 
			self._config.textual_embedding_dim, 
			mask_zero=True))
			language_model.add(LSTM(self._config.hidden_state_dim, 
			return_sequences=False))

			visual_model = Sequential()
			if self._config.visual_embedding_dim > 0:
				visual_model.add(Dense(
				self._config.visual_embedding_dim,
				input_shape=(self._config.visual_dim,)))
			else:
				visual_model.add(Layer(input_shape=(self._config.visual_dim,)))
				self.add(Merge([language_model, visual_model], mode=self._config.multimodal_merge_mode))
				self.add(Dropout(0.5))
				self.add(Dense(self._config.output_dim))
				self.add(Activation('softmax'))
Example #16
0
def create_res_texture_net(input_rows, input_cols, num_res_filters=128,
        res_out_activation='linear', activation='relu', num_res_blocks=5, depth=3):
    '''Adds a series of residual blocks at each resolution scale, rather than just
    the minimium one.
    '''
    net = Graph()
    net.add_input('x', input_shape=(3, input_rows, input_cols))
    add_conv_block(net, 'in0', 'x', num_res_filters // 4, 9, activation=activation)
    last_name = 'in0'
    # scale down input to max depth with a series of strided convolutions
    for scale_i in range(depth):
        num_scale_filters = num_res_filters - scale_i * 8 # // (2 ** scale_i) # (depth - scale_i - 1))
        scale_name = 'down_{}'.format(scale_i)
        add_conv_block(net, scale_name, last_name, num_scale_filters, 3, subsample=(2, 2), activation=activation)
        last_name = scale_name
    # add a series of residual blocks at each scale, from smallest to largest
    for scale_i in reversed(range(depth)):
        num_scale_filters = num_res_filters - scale_i * 8 # // (2 ** scale_i) # (depth - scale_i - 1))
        last_scale_name = last_name
        for res_i in range(num_res_blocks):
            block_name = 'res_{}_{}'.format(scale_i, res_i)
            add_conv_block(net, block_name + '_b0', last_name, num_res_filters, 3, activation=activation)
            add_conv_block(net, block_name + '_b1', block_name + '_b0', num_res_filters, 1, activation='linear')
            if last_name == last_scale_name:
                # tranform residual connection to same number of filters
                add_conv_block(net, block_name + '_res', last_name, num_res_filters, 1, activation='linear')
            else:
                # no transform needed when the last node was part of the current residual block
                net.add_node(Layer(), block_name + '_res', last_name)
            net.add_node(Activation(res_out_activation), block_name, merge_mode='sum', inputs=[block_name + '_b1', block_name + '_res'])
            last_name = block_name
        # theano doesn't seem to support fractionally-strided convolutions at the moment
        up_name = 'up_{}'.format(scale_i)
        net.add_node(UpSampling2D(), up_name, last_name)
        last_name = up_name
        last_scale_name = up_name
    # final output
    add_conv_block(net, 'out', last_name, 3, 9, activation='linear')
    net.add_node(Activation('linear'), 'texture_rgb', 'out', create_output=True)
    return net
Example #17
0
        ZeroPadding2D(padding=(pad,pad)),
        Convolution2D(128, kernel, kernel, border_mode='valid'),
        BatchNormalization(),

        UpSampling2D(size=(pool_size,pool_size)),
        ZeroPadding2D(padding=(pad,pad)),
        Convolution2D(filter_size, kernel, kernel, border_mode='valid'),
        BatchNormalization(),
    ]




segnet_basic = models.Sequential()

segnet_basic.add(Layer(input_shape=(3, 360, 480)))



segnet_basic.encoding_layers = create_encoding_layers()
for l in segnet_basic.encoding_layers:
    segnet_basic.add(l)

# Note: it this looks weird, that is because of adding Each Layer using that for loop
# instead of re-writting mode.add(somelayer+params) everytime.

segnet_basic.decoding_layers = create_decoding_layers()
for l in segnet_basic.decoding_layers:
    segnet_basic.add(l)

segnet_basic.add(Convolution2D(12, 1, 1, border_mode='valid',))
Example #18
0
def getModel():
#can load model from uploaded folder
autoencoder = models.Sequential()
#Add a noise layer to get a denoising autoencoder. This helps avoid overfitting
autoencoder.add(Layer(input_shape=(3, 360, 480)))
autoencoder.encoding_layers = create_encoding_layers()
autoencoder.decoding_layers = create_decoding_layers()
data_shape = 360*480
for l in autoencoder.encoding_layers:
autoencoder.add(l)
for l in autoencoder.decoding_layers:
autoencoder.add(l)
autoencoder.add(Convolution2D(12, 1, 1, border_mode='valid'))
autoencoder.add(Reshape((12,360*480),input_shape=(12,360,480) ))
autoencoder.add(Permute((2, 1)))
autoencoder.add(Activation('softmax'))
autoencoder.compile(loss=categorical_crossentropy, optimizer='adadelta')
autoencoder.load_weights('model_weight_ep100.hdf5')
loaded_model = autoencoder

model=loaded_model # this model will be a trained classifier model. e.g. LinearSVM
return model
def visualize(temp, plot=True):
Sky = [128,128,128]
Building = [128,0,0]
Pole = [192,192,128]
Road_marking = [255,69,0]
Road = [128,64,128]
Pavement = [60,40,222]
Tree = [128,128,0]
SignSymbol = [192,128,128]
Fence = [64,64,128]
Car = [64,0,128]
Pedestrian = [64,64,0]
Bicyclist = [0,128,192]
Unlabelled = [0,0,0]
label_colours = np.array([Sky, Building, Pole, Road, Pavement,
Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled])
r = temp.copy()
g = temp.copy()
b = temp.copy()
for l in range(0,11):
r[temp==l]=label_colours[l,0]
g[temp==l]=label_colours[l,1]
b[temp==l]=label_colours[l,2]

rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
rgb[:,:,0] = (r)#[:,:,0]
rgb[:,:,1] = (g)#[:,:,1]
rgb[:,:,2] = (b)#[:,:,2]
if plot:
    plt.imshow(rgb)
else:
    return rgb.astype(int)
def normalized(rgb):
#return rgb/255.0
norm=np.zeros((rgb.shape[0], rgb.shape[1], 3),np.float32)

b=rgb[:,:,0]
g=rgb[:,:,1]
r=rgb[:,:,2]

norm[:,:,0]=cv2.equalizeHist(b)
norm[:,:,1]=cv2.equalizeHist(g)
norm[:,:,2]=cv2.equalizeHist(r)

return norm
from PIL import Image
import base64,cStringIO
def runModel(data,model):
Sky = [128,128,128]
Building = [128,0,0]
Pole = [192,192,128]
Road_marking = [255,69,0]
Road = [128,64,128]
Pavement = [60,40,222]
Tree = [128,128,0]
SignSymbol = [192,128,128]
Fence = [64,64,128]
Car = [64,0,128]
Pedestrian = [64,64,0]
Bicyclist = [0,128,192]
Unlabelled = [0,0,0]
label_colours = np.array([Sky, Building, Pole, Road, Pavement,
Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled])
ogOut = sys.stdout
sys.stdout = sys.stderr
lst = data['files']
for item in lst:
temp_data = []
im = cv2.imread(item[0]['file-uri'])
im = cv2.resize(im, (480, 360)) 
image = [np.rollaxis(normalized(im),2)]
output = model.predict_proba(np.array(image))
pred = visualize(np.argmax(output[0],axis=1).reshape((360,480)), False)
print(pred)
#im = Image.fromarray(np.array(pred).astype('uint8'))
#buf = cStringIO.StringIO()
#im.save(buf,format='jpeg')
#im_str = base64.b64encode(buf.getvalue())
label = {}
#label ['type'] = 'classification'
#label ['value'] = '<img style="width: 100%" src="data:image/png;base64, ' + im_str +'"/>'
return [label]

data = {'files': [], 'data':''}
dataitem = {'file-uri':'scene1.png'}
data['files'].append([dataitem])
dataitem = {'file-uri' :'scene2.png'}
data['files'].append([dataitem])

model = getModel()
pred = runModel(data,model)
print(pred)
Example #19
0
def CreateGraph(emb_dim, hops, activation, mlp_unit, mlp_layer, word_vec_dim,
                aspect_dim, img_dim, emb_size, polarity_num):
    # model
    model = Graph()
    model.add_input(name='sentence', input_shape=(emb_size, img_dim))
    model.add_input(name='aspect', input_shape=(aspect_dim, ))

    model.add_node(TimeDistributedDense(emb_dim),
                   name='embA',
                   input='sentence')
    model.add_node(TimeDistributedDense(emb_dim),
                   name='embB',
                   input='sentence')
    model.add_node(Dense(emb_dim), name='embC0', input='aspect')

    for i in range(hops):
        model.add_node(Lambda(transpose,
                              input_shape=(emb_size, emb_dim),
                              output_shape=(emb_dim, emb_size)),
                       name='tmp%i_0' % i,
                       input='embA')
        model.add_node(RepeatVector(emb_size),
                       name='tmp%i_1' % i,
                       input='embC%i' % i)
        model.add_node(Lambda(transpose, output_shape=(emb_dim, emb_size)),
                       name='tmp%i_2' % i,
                       input='tmp%i_1' % i)
        model.add_node(Layer(),
                       merge_mode='mul',
                       name='tmp%i_3' % i,
                       inputs=['tmp%i_0' % i, 'tmp%i_2' % i])
        model.add_node(TimeDistributedMerge(),
                       name='dot_%i' % i,
                       input='tmp%i_3' % i)
        model.add_node(Activation('softmax'),
                       name='weights_%i' % i,
                       input='dot_%i' % i)
        model.add_node(RepeatVector(emb_dim),
                       name='tmp%i_4' % i,
                       input='weights_%i' % i)
        model.add_node(Lambda(transpose, output_shape=(emb_size, emb_dim)),
                       name='tmp%i_5' % i,
                       input='tmp%i_4' % i)
        model.add_node(Layer(),
                       merge_mode='mul',
                       name='tmp%i_6' % i,
                       inputs=['embB', 'tmp%i_5' % i])
        model.add_node(TimeDistributedMerge(),
                       name='output_%i' % i,
                       input='tmp%i_6' % i)
        model.add_node(Layer(),
                       name='embC%i' % (i + 1),
                       merge_mode='sum',
                       inputs=['embC%i' % i, 'output_%i' % i])

    if mlp_layer == 0:
        model.add_node(Dense(word_vec_dim), name='mlp0', input='embC%i' % hops)
        model.add_output(name='output', input='mlp0')
        return model
    else:
        model.add_node(Dense(mlp_unit, activation=activation),
                       name='mlp0',
                       input='embC%i' % hops)

    if mlp_layer > 1:
        for j in range(mlp_layer - 1):
            model.add_node(Dense(mlp_unit, activation=activation),
                           name='mlp' + str(j + 1),
                           input='mlp' + str(j))
    model.add_node(Dense(polarity_num, activation='softmax'),
                   name='out',
                   input='mlp' + str(mlp_layer - 1))
    model.add_output(name='output', input='out')
    return model
Example #20
0
        Convolution2D(256, (kernel, kernel), border_mode='valid'),
        BatchNormalization(),
        UpSampling2D(size=(pool_size, pool_size)),
        ZeroPadding2D(padding=(pad, pad)),
        Convolution2D(128, (kernel, kernel), border_mode='valid'),
        BatchNormalization(),
        UpSampling2D(size=(pool_size, pool_size)),
        ZeroPadding2D(padding=(pad, pad)),
        Convolution2D(filter_size, (kernel, kernel), border_mode='valid'),
        BatchNormalization(),
    ]


segnet_basic = models.Sequential()

segnet_basic.add(Layer(input_shape=(352, 480, 3)))

segnet_basic.encoding_layers = create_encoding_layers()
for l in segnet_basic.encoding_layers:
    segnet_basic.add(l)

# Note: it this looks weird, that is because of adding Each Layer using that for loop
# instead of re-writting mode.add(somelayer+params) everytime.

segnet_basic.decoding_layers = create_decoding_layers()
for l in segnet_basic.decoding_layers:
    segnet_basic.add(l)

segnet_basic.add(Convolution2D(
    20,
    1,
    BatchNormalization(),
    Activation('relu'),
    Convolution2D(64, (kernel, kernel), padding='same'),
    BatchNormalization(),
    Activation('relu'),
    UpSampling2D(size=(pool_size, pool_size)),
    Convolution2D(64, (kernel, kernel), padding='same'),
    BatchNormalization(),
    Activation('relu'),
    Convolution2D(n_labels, (1, 1), padding='valid'),
    BatchNormalization(),
]

segnet_basic = models.Sequential()

segnet_basic.add(Layer(input_shape=(img_h, img_w, 3)))

segnet_basic.encoding_layers = encoding_layers
for l in segnet_basic.encoding_layers:
    segnet_basic.add(l)
    print(l.input_shape, l.output_shape, l)

segnet_basic.decoding_layers = decoding_layers
for l in segnet_basic.decoding_layers:
    segnet_basic.add(l)
    print(l.input_shape, l.output_shape, l)

segnet_basic.add(
    Reshape((img_h * img_w, n_labels), input_shape=(img_h, img_w, 2)))
#segnet_basic.add(Permute((2, 1)))
segnet_basic.add(Activation('softmax'))
Example #22
0
def dcgan_small_generator(nb_units=64,
                          input_dim=20,
                          init=normal(0.02),
                          dense_factor=2,
                          nb_dense_layers=2,
                          nb_output_channels=1,
                          filter_size=3,
                          deconv_layers=False,
                          output_size=32):
    n = nb_units
    f = filter_size

    def deconv_up(nb_filter, h, w):
        return Deconvolution2D(nb_filter,
                               h,
                               w,
                               subsample=(2, 2),
                               border_mode=(h // 2, w // 2),
                               init=init)

    def deconv(nb_filter, h, w):
        return Deconvolution2D(nb_filter,
                               h,
                               w,
                               subsample=(1, 1),
                               border_mode=(h // 2, w // 2),
                               init=init)

    model = Sequential()
    model.add(Layer(input_shape=(input_dim, )))
    for _ in range(nb_dense_layers):
        model.add(
            Dense(dense_factor * nb_units,
                  input_dim=input_dim,
                  activation='relu'))

    model.add(Dense(8 * n * 4 * 4, input_dim=input_dim))
    model.add(Activation('relu'))
    model.add(Reshape((
        8 * n,
        4,
        4,
    )))

    if deconv_layers:
        model.add(deconv(8 * n, f, f))
        model.add(Activation('relu'))

    model.add(deconv_up(4 * n, f, f))
    model.add(Activation('relu'))

    if deconv_layers:
        model.add(deconv(4 * n, f, f))
        model.add(Activation('relu'))

    if output_size >= 16:
        model.add(deconv_up(2 * n, f, f))
        model.add(Activation('relu'))

        if deconv_layers:
            model.add(deconv(2 * n, f, f))
            model.add(Activation('relu'))

    if output_size == 32:
        model.add(deconv_up(n, f, f))
        model.add(Activation('relu'))

    if deconv_layers:
        model.add(deconv(n, f, f))
        model.add(Activation('relu'))

    model.add(
        Deconvolution2D(nb_output_channels,
                        f,
                        f,
                        border_mode=(1, 1),
                        init=init))

    model.add(Activation('linear'))
    return model
Example #23
0
def segnet(nClasses, optimizer=None, input_height=360, input_width=480):

    kernel = 3  # 필터 = 커널
    filter_size = 64
    pad = 1  # 패딩 사이즈
    pool_size = 2  # 풀링 사이즈

    model = models.Sequential()  # 모델을 연속적으로 쌓아가겠다걸 알리는 함수, 별의미 없음
    model.add(Layer(input_shape=(3, input_height,
                                 input_width)))  # add 함수 : layer 쌓는 함수

    # encoder
    model.add(ZeroPadding2D(padding=(pad, pad)))  # (1,1)로 zeropadding 한다.
    model.add(Convolution2D(
        filter_size, kernel, kernel,
        border_mode='valid'))  # 왜 커널이 두개 있는지 모르겠지만 아무튼 컨볼루션 layer
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
    # 제로패딩, 컨볼루션, 정규화, Relu, 맥스풀링을 반복한다.
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Convolution2D(128, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
    # 제로패딩, 컨볼루션, 정규화, Relu, 맥스풀링을 반복한다.
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Convolution2D(256, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
    # 제로패딩, 컨볼루션, 정규화, Relu, 맥스풀링을 반복한다.
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Convolution2D(512, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    # 마지막단은 맥스풀링할 필요없다.

    # decoder
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Convolution2D(512, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())
    # 다운 샘플링 할때처럼 여기서는 업샘플링 반복한다.
    model.add(UpSampling2D(size=(pool_size, pool_size)))
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Convolution2D(256, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())

    model.add(UpSampling2D(size=(pool_size, pool_size)))
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Convolution2D(128, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())

    model.add(UpSampling2D(size=(pool_size, pool_size)))
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Convolution2D(filter_size, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())

    model.add(Convolution2D(
        nClasses,
        1,
        1,
        border_mode='valid',
    ))

    model.outputHeight = model.output_shape[-2]
    model.outputWidth = model.output_shape[-1]

    model.add(
        Reshape((nClasses, model.output_shape[-2] * model.output_shape[-1]),
                input_shape=(nClasses, model.output_shape[-2],
                             model.output_shape[-1])))

    model.add(Permute((2, 1)))
    model.add(Activation('softmax'))

    if not optimizer is None:
        model.compile(loss="categorical_crossentropy",
                      optimizer=optimizer,
                      metrics=['accuracy'])

    return model
Example #24
0
def segnet(nClasses , optimizer=None , input_height=1248, input_width=1248 ):

    kernel = 3
    filter_size = 64
    pad = 1
    pool_size = 2

    model = Sequential()
    model.add(Layer(input_shape=(input_height , input_width,1 )))

    # encoder
    model.add(ZeroPadding2D(padding=(pad,pad)))
    model.add(Convolution2D(filter_size, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))

    model.add(ZeroPadding2D(padding=(pad,pad)))
    model.add(Convolution2D(128, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))

    model.add(ZeroPadding2D(padding=(pad,pad)))
    model.add(Convolution2D(256, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))

    model.add(ZeroPadding2D(padding=(pad,pad)))
    model.add(Convolution2D(512, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))


    # decoder
    model.add( ZeroPadding2D(padding=(pad,pad)))
    model.add( Convolution2D(512, kernel, kernel, border_mode='valid'))
    model.add( BatchNormalization())

    model.add( UpSampling2D(size=(pool_size,pool_size)))
    model.add( ZeroPadding2D(padding=(pad,pad)))
    model.add( Convolution2D(256, kernel, kernel, border_mode='valid'))
    model.add( BatchNormalization())

    model.add( UpSampling2D(size=(pool_size,pool_size)))
    model.add( ZeroPadding2D(padding=(pad,pad)))
    model.add( Convolution2D(128, kernel, kernel, border_mode='valid'))
    model.add( BatchNormalization())

    model.add( UpSampling2D(size=(pool_size,pool_size)))
    model.add( ZeroPadding2D(padding=(pad,pad)))
    model.add( Convolution2D(filter_size, kernel, kernel, border_mode='valid'))
    model.add( BatchNormalization())


    model.add(Convolution2D( nClasses , 1, 1, border_mode='valid',))
    print(model.output_shape)
    model.outputHeight = model.output_shape[1]
    model.outputWidth = model.output_shape[2]
    print(model.outputHeight)
    print(model.outputWidth)
        
    model.add(Reshape(( nClasses ,  model.output_shape[1]*model.output_shape[2]   ), input_shape=( nClasses , model.output_shape[1], model.output_shape[2]  )))
    
    model.add(Permute((2, 1)))

    model.add(Activation('softmax'))
        
    if not optimizer is None:
        model.compile(loss="categorical_crossentropy", optimizer= optimizer , metrics=['accuracy'] )
    model.summary()
    return model
Example #25
0
def new():
    model = models.Sequential()
    model.add(Layer(input_shape=(im_dimension, im_height, im_width)))
    return model
Example #26
0
def make_model(args, style_img=None):
    model = Graph()
    model.add_input('content',
                    batch_input_shape=(args.batch_size, 3, args.max_height,
                                       args.max_width))
    try:  # if it's a standard activation then just keep the string
        activations.get(args.activation)
        activation = args.activation
    except:  # otherwise we need to look up the class in advanced activations (e.g. LeakyReLU)
        activation = getattr(advanced_activations, args.activation,
                             'activation function')
    texnet = create_res_texture_net(args.max_height,
                                    args.max_width,
                                    activation=activation,
                                    num_res_filters=args.num_res_filters,
                                    num_res_blocks=args.num_blocks,
                                    depth=args.depth)
    # add the texture net to the model
    model.add_node(texnet, 'texnet', 'content')
    model.add_output('texture_rgb', 'texnet')
    # hook up the training network stuff
    if args.train:
        model.add_node(Layer(),
                       'vgg_concat',
                       inputs=['texnet', 'content'],
                       concat_axis=0)
        # add VGG and the constraints
        keras_vgg_buddy.add_vgg_to_graph(model,
                                         'vgg_concat',
                                         pool_mode=args.pool_mode,
                                         trainable=False,
                                         weights_path=args.vgg_weights)
        # add the regularizers for the various feature layers
        vgg = keras_vgg_buddy.VGG16(args.max_height,
                                    args.max_width,
                                    pool_mode=args.pool_mode,
                                    weights_path=args.vgg_weights)
        print('computing static features')
        feature_layers = set()
        if args.style_weight:
            feature_layers.update(args.style_layers)
        if args.content_weight:
            feature_layers.update(args.content_layers)
        if args.mrf_weight:
            feature_layers.update(args.mrf_layers)
        if args.analogy_weight:
            feature_layers.update(args.analogy_layers)
        style_features = vgg.get_features(np.expand_dims(style_img, 0),
                                          feature_layers)
        regularizers = []
        if args.style_weight != 0.0:
            for layer_name in args.style_layers:
                layer = model.nodes[layer_name]
                style_regularizer = FeatureStyleRegularizer(
                    target=style_features[layer_name],
                    weight=args.style_weight / len(args.style_layers))
                style_regularizer.set_layer(layer)
                regularizers.append(style_regularizer)
        if args.content_weight != 0.0:
            for layer_name in args.content_layers:
                layer = model.nodes[layer_name]
                content_regularizer = FeatureContentRegularizer(
                    weight=args.content_weight / len(args.content_layers))
                content_regularizer.set_layer(layer)
                regularizers.append(content_regularizer)
        if args.mrf_weight != 0.0:
            for layer_name in args.mrf_layers:
                layer = model.nodes[layer_name]
                mrf_regularizer = MRFRegularizer(
                    K.variable(style_features[layer_name]),
                    weight=args.mrf_weight / len(args.mrf_layers))
                mrf_regularizer.set_layer(layer)
                regularizers.append(mrf_regularizer)
        if args.analogy_weight != 0.0:
            style_map_img = keras_vgg_buddy.load_and_preprocess_image(
                args.style_map_image_path, width=args.max_width, square=True)
            style_map_features = vgg.get_features(
                np.expand_dims(style_map_img, 0), args.analogy_layers)
            for layer_name in args.analogy_layers:
                layer = model.nodes[layer_name]
                analogy_regularizer = AnalogyRegularizer(
                    style_map_features[layer_name],
                    style_features[layer_name],
                    weight=args.analogy_weight / len(args.analogy_layers))
                analogy_regularizer.set_layer(layer)
                regularizers.append(analogy_regularizer)
        if args.tv_weight != 0.0:
            tv_regularizer = TVRegularizer(weight=args.tv_weight)
            tv_regularizer.set_layer(model.nodes['texnet'])
            regularizers.append(tv_regularizer)
        setattr(model.nodes['vgg_concat'], 'regularizers',
                regularizers)  # Gotta put em somewhere?

        print('compiling')
        start_compile = time.time()
        adam = Adam(lr=args.learn_rate, beta_1=0.7)
        model.compile(optimizer=adam, loss=dict(texture_rgb=dumb_objective))
        print('Compiled model in {:.2f}'.format(time.time() - start_compile))
    return model
Example #27
0
        UpSampling2D(size=(pool_size, pool_size)),
        # ZeroPadding2D(padding=(pad,pad)),
        # Conv2D(128, (kernel, kernel), padding='valid'),
        Conv2D(filter_size_dec_3, (kernel, kernel), padding='same'),
        BatchNormalization(),
        UpSampling2D(size=(pool_size, pool_size)),
        # ZeroPadding2D(padding=(pad,pad)),
        # Conv2D(filter_size, (kernel, kernel), padding='valid'),
        Conv2D(filter_size_dec_4, (kernel, kernel), padding='same'),
        BatchNormalization(),
    ]


segnet_basic = models.Sequential()

segnet_basic.add(Layer(input_shape=input_shape))

segnet_basic.encoding_layers = create_encoding_layers()
for l in segnet_basic.encoding_layers:
    segnet_basic.add(l)

# Note: it this looks weird, that is because of adding Each Layer using that for loop
# instead of re-writting mode.add(somelayer+params) everytime.

segnet_basic.decoding_layers = create_decoding_layers()
for l in segnet_basic.decoding_layers:
    segnet_basic.add(l)

segnet_basic.add(Conv2D(
    segment_count,
    (1, 1),
Example #28
0
def CreateGraph(emb_dim, hops, activation, mlp_unit, mlp_layer, word_vec_dim,
                img_dim, emb_size, dropout):
    # model
    model = Graph()
    model.add_input(name='image', input_shape=(emb_size, img_dim))
    model.add_input(name='question', input_shape=(30, word_vec_dim))
    model.add_node(LSTM(output_dim=word_vec_dim,
                        return_sequences=False,
                        input_shape=(30, word_vec_dim)),
                   name='query',
                   input='question')

    model.add_node(TimeDistributedDense(emb_dim), name='embA', input='image')
    model.add_node(TimeDistributedDense(emb_dim), name='embB', input='image')
    model.add_node(Dense(emb_dim), name='embC0', input='query')

    for i in range(hops):
        model.add_node(Lambda(transpose,
                              input_shape=(emb_size, emb_dim),
                              output_shape=(emb_dim, emb_size)),
                       name='tmp%i_0' % i,
                       input='embA')
        model.add_node(RepeatVector(emb_size),
                       name='tmp%i_1' % i,
                       input='embC%i' % i)
        model.add_node(Lambda(transpose, output_shape=(emb_dim, emb_size)),
                       name='tmp%i_2' % i,
                       input='tmp%i_1' % i)
        model.add_node(Layer(),
                       merge_mode='mul',
                       name='tmp%i_3' % i,
                       inputs=['tmp%i_0' % i, 'tmp%i_2' % i])
        model.add_node(TimeDistributedMerge(),
                       name='dot_%i' % i,
                       input='tmp%i_3' % i)
        model.add_node(Activation('softmax'),
                       name='weights_%i' % i,
                       input='dot_%i' % i)
        model.add_node(RepeatVector(emb_dim),
                       name='tmp%i_4' % i,
                       input='weights_%i' % i)
        model.add_node(Lambda(transpose, output_shape=(emb_size, emb_dim)),
                       name='tmp%i_5' % i,
                       input='tmp%i_4' % i)
        model.add_node(Layer(),
                       merge_mode='mul',
                       name='tmp%i_6' % i,
                       inputs=['embB', 'tmp%i_5' % i])
        model.add_node(TimeDistributedMerge(),
                       name='output_%i' % i,
                       input='tmp%i_6' % i)
        model.add_node(Layer(),
                       name='embC%i' % (i + 1),
                       merge_mode='sum',
                       inputs=['embC%i' % i, 'output_%i' % i])

    if mlp_layer == 0:
        model.add_node(Dense(word_vec_dim), name='mlp0', input='embC%i' % hops)
        model.add_output(name='output', input='mlp0')
        return model
    else:
        model.add_node(Dense(mlp_unit, activation=activation),
                       name='mlp0',
                       input='embC%i' % hops)
        model.add_node(Dropout(dropout), name='dropout0', input='mlp0')
    if mlp_layer > 1:
        for j in range(mlp_layer - 1):
            model.add_node(Dense(mlp_unit, activation=activation),
                           name='mlp%i' % (j + 1),
                           input='dropout%i' % j)
            model.add_node(Dropout(dropout),
                           name='dropout%i' % (j + 1),
                           input='mlp%i' % (j + 1))
    model.add_node(Dense(word_vec_dim),
                   name='out',
                   input='dropout%i' % (mlp_layer - 1))
    model.add_output(name='output', input='out')
    return model
        Convolution2D(256, kernel, kernel, border_mode='valid'),
        BatchNormalization(),
        UpSampling2D(size=(pool_size, pool_size)),
        ZeroPadding2D(padding=(pad, pad)),
        Convolution2D(128, kernel, kernel, border_mode='valid'),
        BatchNormalization(),
        UpSampling2D(size=(pool_size, pool_size)),
        ZeroPadding2D(padding=(pad, pad)),
        Convolution2D(filter_size, kernel, kernel, border_mode='valid'),
        BatchNormalization(),
    ]


autoencoder = models.Sequential()
# Add a noise layer to get a denoising autoencoder. This helps avoid overfitting
autoencoder.add(Layer(input_shape=(3, 360, 480)))

#autoencoder.add(GaussianNoise(sigma=0.3))
autoencoder.encoding_layers = create_encoding_layers()
autoencoder.decoding_layers = create_decoding_layers()
for i, l in enumerate(autoencoder.encoding_layers):
    autoencoder.add(l)
    print(i, l.input_shape, l.output_shape)
for l in autoencoder.decoding_layers:
    autoencoder.add(l)
    print(i, l.input_shape, l.output_shape)

the_conv = (Convolution2D(
    num_classes,
    1,
    1,
Example #30
0
 def __init__(self, **kwargs):
     Layer.__init__(self, **kwargs)
def CreateGraph(emb_dim, hops, batch_size, activation, mlp_unit, mlp_layer,
                word_vec_dim, img_dim, emb_size):
    # model
    model = Graph()
    model.add_input(name='image', input_shape=(emb_size, img_dim))
    model.add_input(name='word', input_shape=(word_vec_dim, ))

    tdd_a = TimeDistributedDense(emb_dim)
    model.add_node(tdd_a, name='embA', input='image')
    tdd_b = TimeDistributedDense(emb_dim)
    model.add_node(tdd_b, name='embB', input='image')
    query = Dense(emb_dim)
    model.add_node(query, name='embC0', input='word')

    dotlayer = LambdaMerge([tdd_a, query],
                           inner_product,
                           output_shape=(emb_size, ))
    model.add_node(LambdaMerge([tdd_b, dotlayer],
                               weighted_sum,
                               output_shape=(emb_dim, )),
                   name='output0')
    model.add_node(Layer(),
                   name='embC1',
                   merge_mode='sum',
                   inputs=['embC0', 'output0'])
    '''
    for i in range(hops):
        str_emb = 'embC' + str(i)
        str_e = 'embC' + str(i+1)
        str_o = 'output' + str(i)
        str_dot = 'dotLayer' + str(i)

        model.add_node(
                LambdaMerge([Layer(),Layer()], inner_product, output_shape=(emb_size,)),
                name=str_dot,
                inputs=[str_emb, 'embA']
                )
        model.add_node(
                Activation('softmax'),
                name='softmax_'+str(i)
                )
        model.add_node(
                LambdaMerge([Layer(),Layer()], weighted_sum, output_shape=(emb_dim,)),
                name=str_o,
                inputs=['embB', 'softmax_'+str(i)]
        )
        model.add_node(
                Layer(),
                name=str_e,
                merge_mode='sum',
                inputs=[str_emb, str_o]
                )
        #model.add_node(Activation('softmax') ,name=str_dot ,inputs=[str_emb, 'embA'],  merge_mode='dot', dot_axes=)

        # model.add_node(RepeatVector(img_dim) ,name='mid'+str(i) ,input=str_dot)
        # model.add_node(Reshape(input_shape=(img_feature_num,img_dim), dims=(img_dim,img_feature_num)), name='mid2'+str(i), input='embB')
        # model.add_node(TimeDistributedMerge(), name=str_o, input=['mid2'+str(i), 'mid'+str(i)], merge_mode='mul')

        # model.add_node(Merge([str_emb, str_out], mode='sum') ,name= str_e ,inputs=[str_emb, str_o])
    '''

    if mlp_layer == 1:
        model.add_node(Dense(word_vec_dim),
                       name='mlp0',
                       input='embC' + str(hops))
        model.add_output(name='output', input='mlp0')
        return model
    else:
        model.add_node(Dense(mlp_unit, activation=activation),
                       name='mlp0',
                       input='embC' + str(hops))

    if mlp_layer > 2:
        for j in range(mlp_layer - 2):
            model.add_node(Dense(mlp_unit, activation=activation),
                           name='mlp' + str(j + 1),
                           input='mlp' + str(j))
    model.add_node(Dense(word_vec_dim),
                   name='out',
                   input='mlp' + str(mlp_layer - 2))
    model.add_output(name='output', input='out')
    return model
Example #32
0
 def __init__(self, **kwargs):
     Layer.__init__(self, **kwargs)
Example #33
0
    Activation('relu'),
    Convolution2D(64, kernel, kernel, border_mode='same'),
    BatchNormalization(),
    Activation('relu'),

    UpSampling2D(size=(pool_size,pool_size)),
    Convolution2D(64, kernel, kernel, border_mode='same'),
    BatchNormalization(),
    Activation('relu'),
    Convolution2D(n_labels, 1, 1, border_mode='valid'),
    BatchNormalization(),
]


segnet_basic = models.Sequential()
segnet_basic.add(Layer(input_shape=(128,128,7)))


segnet_basic.encoding_layers = encoding_layers
for l in segnet_basic.encoding_layers:
    segnet_basic.add(l)


segnet_basic.decoding_layers = decoding_layers
for l in segnet_basic.decoding_layers:
    segnet_basic.add(l)
    
    
segnet_basic.add(Activation('softmax'))

print(segnet_basic.summary())