Exemple #1
0
def cnn_run_dropout_maxout(data_path, num_rows, num_cols, num_channels,
                           input_path, pred_path):
    t = time.time()
    sub_window = gen_center_sub_window(76, num_cols)
    trn = SarDataset(ds[0][0], ds[0][1], sub_window)
    vld = SarDataset(ds[1][0], ds[1][1], sub_window)
    tst = SarDataset(ds[2][0], ds[2][1], sub_window)
    print 'Take {}s to read data'.format(time.time() - t)
    t = time.time()
    batch_size = 100
    h1 = maxout.Maxout(layer_name='h2', num_units=1, num_pieces=100, irange=.1)
    hidden_layer = mlp.ConvRectifiedLinear(layer_name='h2',
                                           output_channels=8,
                                           irange=0.05,
                                           kernel_shape=[5, 5],
                                           pool_shape=[2, 2],
                                           pool_stride=[2, 2],
                                           max_kernel_norm=1.9365)
    hidden_layer2 = mlp.ConvRectifiedLinear(layer_name='h3',
                                            output_channels=8,
                                            irange=0.05,
                                            kernel_shape=[5, 5],
                                            pool_shape=[2, 2],
                                            pool_stride=[2, 2],
                                            max_kernel_norm=1.9365)
    #output_layer = mlp.Softplus(dim=1,layer_name='output',irange=0.1)
    output_layer = mlp.Linear(dim=1, layer_name='output', irange=0.05)
    trainer = sgd.SGD(learning_rate=0.001,
                      batch_size=100,
                      termination_criterion=EpochCounter(2000),
                      cost=dropout.Dropout(),
                      train_iteration_mode='even_shuffled_sequential',
                      monitor_iteration_mode='even_shuffled_sequential',
                      monitoring_dataset={
                          'test': tst,
                          'valid': vld,
                          'train': trn
                      })
    layers = [hidden_layer, hidden_layer2, output_layer]
    input_space = space.Conv2DSpace(shape=[num_rows, num_cols],
                                    num_channels=num_channels)

    ann = mlp.MLP(layers, input_space=input_space, batch_size=batch_size)
    watcher = best_params.MonitorBasedSaveBest(channel_name='valid_objective',
                                               save_path='sar_cnn_mlp.pkl')
    experiment = Train(dataset=trn,
                       model=ann,
                       algorithm=trainer,
                       extensions=[watcher])
    print 'Take {}s to compile code'.format(time.time() - t)
    t = time.time()
    experiment.main_loop()
    print 'Training time: {}s'.format(time.time() - t)
    serial.save('cnn_hhv_{0}_{1}.pkl'.format(num_rows, num_cols),
                ann,
                on_overwrite='backup')

    #read hh and hv into a 3D numpy
    image = read_hhv(input_path)
    return ann, sar_predict(ann, image, pred_path)
Exemple #2
0
    def _create_layer(self, name, layer, irange):
        if isinstance(layer, Convolution):
            return self._create_convolution_layer(name, layer, irange)

        if layer.type == "Rectifier":
            self._check_layer(layer, ['units'])
            return mlp.RectifiedLinear(layer_name=name,
                                       dim=layer.units,
                                       irange=irange)

        if layer.type == "Sigmoid":
            self._check_layer(layer, ['units'])
            return mlp.Sigmoid(layer_name=name, dim=layer.units, irange=irange)

        if layer.type == "Tanh":
            self._check_layer(layer, ['units'])
            return mlp.Tanh(layer_name=name, dim=layer.units, irange=irange)

        if layer.type == "Maxout":
            self._check_layer(layer, ['units', 'pieces'])
            return maxout.Maxout(layer_name=name,
                                 num_units=layer.units,
                                 num_pieces=layer.pieces,
                                 irange=irange)

        if layer.type == "Linear":
            self._check_layer(layer, ['units'])
            return mlp.Linear(layer_name=layer.name,
                              dim=layer.units,
                              irange=irange)

        if layer.type == "Gaussian":
            self._check_layer(layer, ['units'])
            return mlp.LinearGaussian(layer_name=layer.name,
                                      init_beta=0.1,
                                      min_beta=0.001,
                                      max_beta=1000,
                                      beta_lr_scale=None,
                                      dim=layer.units,
                                      irange=irange)

        if layer.type == "Softmax":
            self._check_layer(layer, ['units'])
            return mlp.Softmax(layer_name=layer.name,
                               n_classes=layer.units,
                               irange=irange)
Exemple #3
0
    def __linit(self, X, y):
        if (self.verbose > 0):
            print "Lazy initialisation"

        layers = self.layers
        pylearn2mlp_layers = []
        self.units_per_layer = []
        #input layer units
        self.units_per_layer += [X.shape[1]]

        for layer in layers[:-1]:
            self.units_per_layer += [layer[1]]

        #Output layer units
        self.units_per_layer += [y.shape[1]]

        if (self.verbose > 0):
            print "Units per layer", str(self.units_per_layer)

        for i, layer in enumerate(layers[:-1]):

            fan_in = self.units_per_layer[i] + 1
            fan_out = self.units_per_layer[i + 1]
            lim = np.sqrt(6) / (np.sqrt(fan_in + fan_out))
            layer_name = "Hidden_%i_%s" % (i, layer[0])
            activate_type = layer[0]
            if activate_type == "RectifiedLinear":
                hidden_layer = mlp.RectifiedLinear(dim=layer[1],
                                                   layer_name=layer_name,
                                                   irange=lim)
            elif activate_type == "Sigmoid":
                hidden_layer = mlp.Sigmoid(dim=layer[1],
                                           layer_name=layer_name,
                                           irange=lim)
            elif activate_type == "Tanh":
                hidden_layer = mlp.Tanh(dim=layer[1],
                                        layer_name=layer_name,
                                        irange=lim)
            elif activate_type == "Maxout":
                hidden_layer = maxout.Maxout(num_units=layer[1],
                                             num_pieces=layer[2],
                                             layer_name=layer_name,
                                             irange=lim)
            else:
                raise NotImplementedError(
                    "Layer of type %s are not implemented yet" % layer[0])
            pylearn2mlp_layers += [hidden_layer]

        output_layer_info = layers[-1]
        output_layer_name = "Output_%s" % output_layer_info[0]

        fan_in = self.units_per_layer[-2] + 1
        fan_out = self.units_per_layer[-1]
        lim = np.sqrt(6) / (np.sqrt(fan_in + fan_out))

        if (output_layer_info[0] == "Linear"):
            output_layer = mlp.Linear(dim=self.units_per_layer[-1],
                                      layer_name=output_layer_name,
                                      irange=lim)
            pylearn2mlp_layers += [output_layer]

        self.mlp = mlp.MLP(pylearn2mlp_layers, nvis=self.units_per_layer[0])
        self.ds = DenseDesignMatrix(X=X, y=y)
        self.trainer.setup(self.mlp, self.ds)
        inputs = self.mlp.get_input_space().make_theano_batch()
        self.f = theano.function([inputs], self.mlp.fprop(inputs))
Exemple #4
0
l3 = maxout.MaxoutConvC01B(layer_name='l3',
                           pad=3,
                           tied_b=1,
                           W_lr_scale=.05,
                           b_lr_scale=.05,
                           num_channels=192,
                           num_pieces=2,
                           kernel_shape=(5, 5),
                           pool_shape=(2, 2),
                           pool_stride=(2, 2),
                           irange=.005,
                           max_kernel_norm=1.9365)

l4 = maxout.Maxout(layer_name='l4',
                   irange=.005,
                   num_units=500,
                   num_pieces=5,
                   max_col_norm=1.9)

output = mlp.Softmax(layer_name='y',
                     n_classes=10,
                     irange=.005,
                     max_col_norm=1.9365)

layers = [l1, l2, l3, l4, output]

mdl = mlp.MLP(layers,
              input_space=in_space)

trainer = sgd.SGD(learning_rate=.17,
                  batch_size=128,