Example #1
0
 def get_batch(self, data, step):
     batch_size = self.batch_size
     if data == 'train':
         x_batch, y_batch = zip(
             *self.train_data[step * batch_size:(step + 1) * batch_size])
         return K.asarray(x_batch, dtype=K.float32), K.asarray(y_batch)
     elif data == 'validation':
         x_batch, y_batch = zip(
             *self.val_data[step * batch_size:(step + 1) * batch_size])
         return K.asarray(x_batch, dtype=K.float32), K.asarray(y_batch)
     else:
         raise ValueError('data must be train or validation')
Example #2
0
    def forward(self, x):
        """
        An implementation of the forward pass for max pooling based on im2col.

        This isn't much faster than the naive version, so it should be avoided
        if possible.
        """
        N, C, H, W = x.shape
        pool_height, pool_width = self.pool_size
        stride = self.stride

        out_height = int((H - pool_height) // stride + 1)
        out_width = int((W - pool_width) // stride + 1)

        x_split = x.reshape(N * C, 1, H, W)
        x_split_cpu = K.get_cpu_array(x_split)
        x_cols = K.asarray(
            im2col(x_split_cpu, pool_height, pool_width, 0, stride))
        x_cols_argmax = x_cols.argmax(axis=0)
        x_cols_max = x_cols[x_cols_argmax, K.arange(x_cols.shape[1])]
        out = x_cols_max.reshape(
            out_height, out_width, N, C).transpose(2, 3, 0, 1)

        self.x, self.x_cols, self.x_cols_argmax = x, x_cols, x_cols_argmax

        return out
Example #3
0
 def get_batch(self, data, step):
     if data == 'train':
         x_batch, y_batch = next(self.train_generator)
         if self.data_format == 'channels_last':
             x_batch = K.asarray(x_batch.transpose(0, 3, 1, 2),
                                 dtype=K.float32)
             y_batch = K.asarray(y_batch)
             return x_batch, y_batch
     elif data == 'validation':
         x_batch, y_batch = next(self.val_generator)
         if self.data_format == 'channels_last':
             x_batch = K.asarray(x_batch.transpose(0, 3, 1, 2),
                                 dtype=K.float32)
             y_batch = K.asarray(y_batch)
             return x_batch, y_batch
     else:
         raise ValueError('data must be train or validation')
Example #4
0
    def _im2col(self, x):
        w = self.params['w']
        pad, stride = self.pad, self.stride
        _, _, filter_height, filter_width = w.shape

        if filter_height == 1 and filter_width == 1 and pad == 0:
            return x.transpose(1, 2, 3, 0).reshape(x.shape[1], -1)
        else:
            x_cpu = K.get_cpu_array(x)
            return K.asarray(
                im2col(x_cpu, filter_height, filter_width, pad, stride))
Example #5
0
    def _col2im(self, dx_cols):
        w, stride, pad = self.params['w'], self.stride, self.pad
        _, _, filter_height, filter_width = w.shape
        N, C, H, W = self.x_shape

        if filter_height == 1 and filter_width == 1 and pad == 0:
            return dx_cols.reshape(C, H, W, N).transpose(3, 0, 1, 2)
        else:
            dx_cols_cpu = K.get_cpu_array(dx_cols)
            return K.asarray(
                col2im(dx_cols_cpu, N, C, H, W, filter_height, filter_width,
                       pad, stride))
Example #6
0
    def backward(self, dout):
        """
        An implementation of the backward pass for max pooling based on im2col.

        This isn't much faster than the naive version, so it should be avoided
        if possible.
        """
        x, x_cols, x_cols_argmax = self.x, self.x_cols, self.x_cols_argmax
        N, C, H, W = x.shape
        pool_height, pool_width = self.pool_size
        stride = self.stride

        dout_reshaped = dout.transpose(2, 3, 0, 1).flatten()
        dx_cols = K.zeros_like(x_cols)
        dx_cols[x_cols_argmax, K.arange(dx_cols.shape[1])] = dout_reshaped
        dx_cols_cpu = K.get_cpu_array(dx_cols)
        dx = K.asarray(
            col2im(dx_cols_cpu, (N * C), 1, H, W, pool_height, pool_width,
                   0, stride))
        dx = dx.reshape(x.shape)

        return dx