def get_layer_output(self,
                         layer,
                         input_=None,
                         modelname='model_norm',
                         main_layer='main_seq',
                         prediction_batch_size=1):
        _layer = self.models[modelname].get_layer(main_layer).get_layer(layer)

        if not input_:
            input_ = [
                np.ones(shape=(prediction_batch_size, ) + i[1:]) for i in
                self.models[modelname].get_layer(main_layer).input_shape
            ]

        if self.layer_formats[layer] > 0:
            output_layer = fPermute((3, 0, 1, 2))(_layer.output)
            output_layer = Lambda(
                lambda x: T.reshape(x[0], (prediction_batch_size, ) + tuple(
                    T.shape(output_layer)[1:])),
                output_shape=lambda input_shape:
                (prediction_batch_size, ) + input_shape[1:])(output_layer)
        else:
            try:
                output_layer = _layer.output
            except AttributeError:
                print 'debug infos after Attribute error'
                print layer
                print _layer
                raise AttributeError

        intermediate_layer_model = Model(
            inputs=self.models[modelname].get_layer(main_layer).get_input_at(
                0),
            outputs=output_layer)
        return intermediate_layer_model.predict(
            input_, batch_size=prediction_batch_size)
Esempio n. 2
0
model.add(
    Merge(
        [model1, model2],
        mode=kaggle_input,
        output_shape=lambda x: (BATCH_SIZE * 4 * N_INPUT_VARIATION,
                                NUM_INPUT_FEATURES, PART_SIZE, PART_SIZE),
        arguments={
            'part_size': PART_SIZE,
            'n_input_var': N_INPUT_VARIATION,
            'include_flip': False,
            'random_flip': True
        }))

if debug: print model.output_shape

model.add(fPermute((1, 2, 3, 0)))

if debug: print model.output_shape

model.add(kerasCudaConvnetConv2DLayer(n_filters=32, filter_size=6))
if debug: print model.output_shape
model.add(kerasCudaConvnetPooling2DLayer())

if debug: print model.output_shape

model.add(kerasCudaConvnetConv2DLayer(n_filters=64, filter_size=5))
if debug: print model.output_shape
model.add(kerasCudaConvnetPooling2DLayer())

model.add(kerasCudaConvnetConv2DLayer(n_filters=128, filter_size=3))
model.add(
    def init_models(self):
        print "init model"
        input_tensor = Input(
            batch_shape=(self.BATCH_SIZE, self.NUM_INPUT_FEATURES,
                         self.input_sizes[0][0], self.input_sizes[0][1]),
            dtype='float32',
            name='input_tensor')

        input_tensor_45 = Input(
            batch_shape=(self.BATCH_SIZE, self.NUM_INPUT_FEATURES,
                         self.input_sizes[1][0], self.input_sizes[1][1]),
            dtype='float32',
            name='input_tensor_45')

        input_lay_0 = InputLayer(
            batch_input_shape=(self.BATCH_SIZE, self.NUM_INPUT_FEATURES,
                               self.input_sizes[0][0], self.input_sizes[0][1]),
            name='input_lay_seq_0')

        input_lay_1 = InputLayer(
            batch_input_shape=(self.BATCH_SIZE, self.NUM_INPUT_FEATURES,
                               self.input_sizes[1][0], self.input_sizes[1][1]),
            name='input_lay_seq_1')

        model = Sequential(name='main_seq')

        N_INPUT_VARIATION = 2  # depends on the kaggle input settings
        include_flip = self.include_flip

        num_views = N_INPUT_VARIATION * (2 if include_flip else 1)

        model.add(
            Merge(
                [input_lay_0, input_lay_1],
                mode=kaggle_input,
                output_shape=lambda x:
                ((input_lay_0.output_shape[0] + input_lay_1.output_shape[0]
                  ) * 2 * N_INPUT_VARIATION, self.NUM_INPUT_FEATURES, self.
                 PART_SIZE, self.PART_SIZE),
                arguments={
                    'part_size': self.PART_SIZE,
                    'n_input_var': N_INPUT_VARIATION,
                    'include_flip': include_flip,
                    'random_flip': False
                },
                name='input_merge'))

        # needed for the pylearn moduls used by kerasCudaConvnetConv2DLayer and
        # kerasCudaConvnetPooling2DLayer
        model.add(fPermute((1, 2, 3, 0), name='input_perm'))

        model.add(
            kerasCudaConvnetConv2DLayer(n_filters=32,
                                        filter_size=6,
                                        untie_biases=True,
                                        name='conv_0'))
        model.add(kerasCudaConvnetPooling2DLayer(name='pool_0'))

        model.add(
            kerasCudaConvnetConv2DLayer(n_filters=64,
                                        filter_size=5,
                                        untie_biases=True,
                                        name='conv_1'))
        model.add(kerasCudaConvnetPooling2DLayer(name='pool_1'))

        model.add(
            kerasCudaConvnetConv2DLayer(n_filters=128,
                                        filter_size=3,
                                        untie_biases=True,
                                        name='conv_2'))
        model.add(
            kerasCudaConvnetConv2DLayer(n_filters=128,
                                        filter_size=3,
                                        weights_std=0.1,
                                        untie_biases=True,
                                        name='conv_3'))

        model.add(kerasCudaConvnetPooling2DLayer(name='pool_2'))

        model.add(fPermute((3, 0, 1, 2), name='cuda_out_perm'))

        model.add(
            Lambda(function=kaggle_MultiRotMergeLayer_output,
                   output_shape=lambda x:
                   (x[0] // 4 // N_INPUT_VARIATION,
                    (x[1] * x[2] * x[3] * 4 * num_views)),
                   arguments={'num_views': num_views},
                   name='conv_out_merge'))

        model.add(Dropout(0.5))
        model.add(
            MaxoutDense(output_dim=2048,
                        nb_feature=2,
                        weights=dense_weight_init_values(
                            model.output_shape[-1], 2048, nb_feature=2),
                        name='maxout_0'))

        model.add(Dropout(0.5))
        model.add(
            MaxoutDense(output_dim=2048,
                        nb_feature=2,
                        weights=dense_weight_init_values(
                            model.output_shape[-1], 2048, nb_feature=2),
                        name='maxout_1'))

        model.add(Dropout(0.5))
        model.add(
            Dense(units=37,
                  activation='relu',
                  kernel_initializer=initializers.RandomNormal(stddev=0.01),
                  bias_initializer=initializers.Constant(value=0.1),
                  name='dense_output'))

        model_seq = model([input_tensor, input_tensor_45])

        CATEGORISED = False  # FXME has to be implemented

        output_layer_norm = Lambda(function=OptimisedDivGalaxyOutput,
                                   output_shape=lambda x: x,
                                   arguments={
                                       'normalised': True,
                                       'categorised': CATEGORISED
                                   })(model_seq)
        output_layer_noNorm = Lambda(function=OptimisedDivGalaxyOutput,
                                     output_shape=lambda x: x,
                                     arguments={
                                         'normalised': False,
                                         'categorised': CATEGORISED
                                     })(model_seq)

        model_norm = Model(inputs=[input_tensor, input_tensor_45],
                           outputs=output_layer_norm,
                           name='full_model_norm')
        model_norm_metrics = Model(inputs=[input_tensor, input_tensor_45],
                                   outputs=output_layer_norm,
                                   name='full_model_metrics')
        model_noNorm = Model(inputs=[input_tensor, input_tensor_45],
                             outputs=output_layer_noNorm,
                             name='full_model_noNorm')

        self.models = {
            'model_norm': model_norm,
            'model_norm_metrics': model_norm_metrics,
            'model_noNorm': model_noNorm
        }

        self._compile_models()

        return self.models