Beispiel #1
0
if debug: print model.output_shape

model.add(
    Lambda(function=kaggle_MultiRotMergeLayer_output,
           output_shape=lambda x:
           (x[0] // 4 // N_INPUT_VARIATION,
            (x[1] * x[2] * x[3] * 4 * N_INPUT_VARIATION)),
           arguments={
               'num_views': N_INPUT_VARIATION,
               'mb_size': 16
           }))

if debug: print model.output_shape

#model.add(Dense(output_dim=4096, init=functools.partial(initializations.normal, scale=0.001) ))
model.add(Dense(output_dim=4096, weights=dense_weight_init_values(4096, 4096)))
model.add(Dropout(0.5))
model.add(Reshape((4096, 1)))
model.add(MaxPooling1D())
model.add(Reshape((2048, )))
model.add(Dense(output_dim=4096, weights=dense_weight_init_values(2048, 4096)))

if debug: print model.output_shape

model.add(Dropout(0.5))
model.add(Reshape((4096, 1)))
model.add(MaxPooling1D())

if debug: print model.output_shape

model.add(Reshape((2048, )))
if debug: print model.output_shape

model.add(
    Lambda(function=kaggle_MultiRotMergeLayer_output,
           output_shape=lambda x:
           (x[0] // 4 // N_INPUT_VARIATION,
            (x[1] * x[2] * x[3] * 4 * N_INPUT_VARIATION)),
           arguments={'num_views': N_INPUT_VARIATION}))

if debug: print model.output_shape

#model.add(Dense(output_dim=4096, init=functools.partial(initializations.normal, scale=0.001) ))
model.add(Dropout(0.5))
model.add(
    Dense(output_dim=4096,
          weights=dense_weight_init_values(model.output_shape[-1], 4096)))
#model.add(Dropout(0.5))
model.add(Reshape((model.output_shape[-1], 1)))
model.add(MaxPooling1D())
model.add(Reshape((model.output_shape[1], )))
model.add(Dropout(0.5))
model.add(
    Dense(output_dim=4096,
          weights=dense_weight_init_values(model.output_shape[-1], 4096)))

if debug: print model.output_shape

#model.add(Dropout(0.5))
model.add(Reshape((model.output_shape[-1], 1)))
model.add(MaxPooling1D())
    def init_models_ellipse(self, input_shape=9):
        print "init model"
        input_tensor = Input(batch_shape=(self.BATCH_SIZE, input_shape),
                             dtype='float32',
                             name='input_tensor')

        input_lay_0 = InputLayer(batch_input_shape=(self.BATCH_SIZE,
                                                    input_shape),
                                 name='input_lay_seq_0')

        model = Sequential(name='main_seq')

        model.add(Dropout(0.5, input_shape=(input_shape, )))
        model.add(
            MaxoutDense(output_dim=2048,
                        nb_feature=2,
                        weights=dense_weight_init_values(
                            model.output_shape[-1], 2048, nb_feature=2),
                        name='maxout_0'))

        model.add(Dropout(0.5))
        model.add(
            MaxoutDense(output_dim=2048,
                        nb_feature=2,
                        weights=dense_weight_init_values(
                            model.output_shape[-1], 2048, nb_feature=2),
                        name='maxout_1'))

        model.add(Dropout(0.5))
        model.add(
            Dense(units=37,
                  activation='relu',
                  kernel_initializer=initializers.RandomNormal(stddev=0.01),
                  bias_initializer=initializers.Constant(value=0.1),
                  name='dense_output'))

        model_seq = model([input_tensor])

        CATEGORISED = False  # FXME has to be implemented

        output_layer_norm = Lambda(function=OptimisedDivGalaxyOutput,
                                   output_shape=lambda x: x,
                                   arguments={
                                       'normalised': True,
                                       'categorised': CATEGORISED
                                   })(model_seq)
        output_layer_noNorm = Lambda(function=OptimisedDivGalaxyOutput,
                                     output_shape=lambda x: x,
                                     arguments={
                                         'normalised': False,
                                         'categorised': CATEGORISED
                                     })(model_seq)

        model_norm = Model(inputs=[input_tensor],
                           outputs=output_layer_norm,
                           name='full_model_norm_ellipse')
        model_norm_metrics = Model(inputs=[input_tensor],
                                   outputs=output_layer_norm,
                                   name='full_model_metrics_ellipse')
        model_noNorm = Model(inputs=[input_tensor],
                             outputs=output_layer_noNorm,
                             name='full_model_noNorm_ellipse')

        self.models = {
            'model_norm_ellipse': model_norm,
            'model_norm_metrics_ellipse': model_norm_metrics,
            'model_noNorm_ellipse': model_noNorm
        }

        self._compile_models(postfix='_ellipse')

        return self.models
    def init_models(self):
        print "init model"
        input_tensor = Input(
            batch_shape=(self.BATCH_SIZE, self.NUM_INPUT_FEATURES,
                         self.input_sizes[0][0], self.input_sizes[0][1]),
            dtype='float32',
            name='input_tensor')

        input_tensor_45 = Input(
            batch_shape=(self.BATCH_SIZE, self.NUM_INPUT_FEATURES,
                         self.input_sizes[1][0], self.input_sizes[1][1]),
            dtype='float32',
            name='input_tensor_45')

        input_lay_0 = InputLayer(
            batch_input_shape=(self.BATCH_SIZE, self.NUM_INPUT_FEATURES,
                               self.input_sizes[0][0], self.input_sizes[0][1]),
            name='input_lay_seq_0')

        input_lay_1 = InputLayer(
            batch_input_shape=(self.BATCH_SIZE, self.NUM_INPUT_FEATURES,
                               self.input_sizes[1][0], self.input_sizes[1][1]),
            name='input_lay_seq_1')

        model = Sequential(name='main_seq')

        N_INPUT_VARIATION = 2  # depends on the kaggle input settings
        include_flip = self.include_flip

        num_views = N_INPUT_VARIATION * (2 if include_flip else 1)

        model.add(
            Merge(
                [input_lay_0, input_lay_1],
                mode=kaggle_input,
                output_shape=lambda x:
                ((input_lay_0.output_shape[0] + input_lay_1.output_shape[0]
                  ) * 2 * N_INPUT_VARIATION, self.NUM_INPUT_FEATURES, self.
                 PART_SIZE, self.PART_SIZE),
                arguments={
                    'part_size': self.PART_SIZE,
                    'n_input_var': N_INPUT_VARIATION,
                    'include_flip': include_flip,
                    'random_flip': False
                },
                name='input_merge'))

        # needed for the pylearn moduls used by kerasCudaConvnetConv2DLayer and
        # kerasCudaConvnetPooling2DLayer
        model.add(fPermute((1, 2, 3, 0), name='input_perm'))

        model.add(
            kerasCudaConvnetConv2DLayer(n_filters=32,
                                        filter_size=6,
                                        untie_biases=True,
                                        name='conv_0'))
        model.add(kerasCudaConvnetPooling2DLayer(name='pool_0'))

        model.add(
            kerasCudaConvnetConv2DLayer(n_filters=64,
                                        filter_size=5,
                                        untie_biases=True,
                                        name='conv_1'))
        model.add(kerasCudaConvnetPooling2DLayer(name='pool_1'))

        model.add(
            kerasCudaConvnetConv2DLayer(n_filters=128,
                                        filter_size=3,
                                        untie_biases=True,
                                        name='conv_2'))
        model.add(
            kerasCudaConvnetConv2DLayer(n_filters=128,
                                        filter_size=3,
                                        weights_std=0.1,
                                        untie_biases=True,
                                        name='conv_3'))

        model.add(kerasCudaConvnetPooling2DLayer(name='pool_2'))

        model.add(fPermute((3, 0, 1, 2), name='cuda_out_perm'))

        model.add(
            Lambda(function=kaggle_MultiRotMergeLayer_output,
                   output_shape=lambda x:
                   (x[0] // 4 // N_INPUT_VARIATION,
                    (x[1] * x[2] * x[3] * 4 * num_views)),
                   arguments={'num_views': num_views},
                   name='conv_out_merge'))

        model.add(Dropout(0.5))
        model.add(
            MaxoutDense(output_dim=2048,
                        nb_feature=2,
                        weights=dense_weight_init_values(
                            model.output_shape[-1], 2048, nb_feature=2),
                        name='maxout_0'))

        model.add(Dropout(0.5))
        model.add(
            MaxoutDense(output_dim=2048,
                        nb_feature=2,
                        weights=dense_weight_init_values(
                            model.output_shape[-1], 2048, nb_feature=2),
                        name='maxout_1'))

        model.add(Dropout(0.5))
        model.add(
            Dense(units=37,
                  activation='relu',
                  kernel_initializer=initializers.RandomNormal(stddev=0.01),
                  bias_initializer=initializers.Constant(value=0.1),
                  name='dense_output'))

        model_seq = model([input_tensor, input_tensor_45])

        CATEGORISED = False  # FXME has to be implemented

        output_layer_norm = Lambda(function=OptimisedDivGalaxyOutput,
                                   output_shape=lambda x: x,
                                   arguments={
                                       'normalised': True,
                                       'categorised': CATEGORISED
                                   })(model_seq)
        output_layer_noNorm = Lambda(function=OptimisedDivGalaxyOutput,
                                     output_shape=lambda x: x,
                                     arguments={
                                         'normalised': False,
                                         'categorised': CATEGORISED
                                     })(model_seq)

        model_norm = Model(inputs=[input_tensor, input_tensor_45],
                           outputs=output_layer_norm,
                           name='full_model_norm')
        model_norm_metrics = Model(inputs=[input_tensor, input_tensor_45],
                                   outputs=output_layer_norm,
                                   name='full_model_metrics')
        model_noNorm = Model(inputs=[input_tensor, input_tensor_45],
                             outputs=output_layer_noNorm,
                             name='full_model_noNorm')

        self.models = {
            'model_norm': model_norm,
            'model_norm_metrics': model_norm_metrics,
            'model_noNorm': model_noNorm
        }

        self._compile_models()

        return self.models
model.add(kerasCudaConvnetPooling2DLayer())

if debug : print model.output_shape

model.add(fPermute((3,0,1,2)))

if debug : print model.output_shape

model.add(Lambda(function=kaggle_MultiRotMergeLayer_output, output_shape=lambda x : ( x[0]//4//N_INPUT_VARIATION, (x[1]*x[2]*x[3]*4* N_INPUT_VARIATION) ) , arguments={'num_views':N_INPUT_VARIATION}) ) 

if debug : print model.output_shape


model.add(Dropout(0.5))
model.add(MaxoutDense(output_dim=2048, nb_feature=2 ,weights = dense_weight_init_values(model.output_shape[-1],2048, nb_feature=2) )) 
model.add(Dropout(0.5))

model.add(MaxoutDense(output_dim=2048, nb_feature=2 ,weights = dense_weight_init_values(model.output_shape[-1],2048, nb_feature=2) )) 
model.add(Dropout(0.5))

model.add(Dense(output_dim=37, weights = dense_weight_init_values(model.output_shape[-1],37 ,w_std = 0.01 , b_init_val = 0.1 ) ))

if debug : print model.output_shape

model_seq=model([input_tensor,input_tensor_45])

output_layer_norm = Lambda(function=OptimisedDivGalaxyOutput , output_shape=lambda x: x ,arguments={'normalised':True,'categorised':CATEGORISED})(model_seq)
output_layer_noNorm = Lambda(function=OptimisedDivGalaxyOutput , output_shape=lambda x: x ,arguments={'normalised':False,'categorised':CATEGORISED})(model_seq)

model_norm=Model(input=[input_tensor,input_tensor_45],output=output_layer_norm)
            'random_flip': True
        }))

if debug: print model.output_shape

model.add(Lambda(ev_translation, output_shape=(REDUCTION, )))

#needed for the pylearn moduls used by kerasCudaConvnetConv2DLayer and kerasCudaConvnetPooling2DLayer

if debug: print model.output_shape

model.add(Dropout(0.5))
model.add(
    Dense(output_dim=2048,
          activation='tanh',
          weights=dense_weight_init_values(model.output_shape[-1], 2048)))

model.add(Dropout(0.5))
model.add(
    MaxoutDense(output_dim=2048,
                nb_feature=2,
                weights=dense_weight_init_values(model.output_shape[-1],
                                                 2048,
                                                 nb_feature=2)))
model.add(Dropout(0.5))

model.add(
    MaxoutDense(output_dim=2048,
                nb_feature=2,
                weights=dense_weight_init_values(model.output_shape[-1],
                                                 2048,