class LinConPoo(Layer): def __init__(self, sequence_list): super(LinConPoo, self).__init__() self.__sequence_list = copy.deepcopy(sequence_list) if not isinstance(self.__sequence_list, list): raise ValueError('sequence_list error') self._layers_squence = Sequential() self._layers_list = [] LAYLIST = [ConvBNLayer, Conv2D, Linear, Pool2D] for i, layer_arg in enumerate(self.__sequence_list): if isinstance(layer_arg, dict): layer_class = layer_arg.pop('type') if not layer_class in LAYLIST: raise KeyError( "the parameters of sequence_list must be within `[ConvBNLayer, Conv2D, Linear, Pool2D]`" ) layer_obj = layer_class(**layer_arg) elif isinstance(layer_arg, list): layer_class = layer_arg.pop(0) if not layer_class in LAYLIST: raise KeyError( "the parameters of sequence_list must be within `[ConvBNLayer, Conv2D, Linear, Pool2D]`" ) layer_obj = layer_class(*layer_arg) else: raise ValueError("sequence_list error") layer_name = layer_class.__name__ + str(i) self._layers_list.append((layer_name, layer_obj)) self._layers_squence.add_sublayer(*(layer_name, layer_obj)) self._layers_squence = Sequential(*self._layers_list) def forward(self, inputs, show_shape=False): if show_shape: x = inputs for op in self._layers_list: x = op[1](x) print(op[0], '\t', x.shape) return x return self._layers_squence(inputs)
class ConvLSTM(fluid.dygraph.Layer): def __init__(self, in_channels, hidden_channels, kernel_size, num_layers, batch_first=True, return_all_layers=False, training=True): super(ConvLSTM, self).__init__() self.cell_list = Sequential() self._check_kernel_size_consistency(kernel_size) # Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers kernel_size = self._extend_for_multilayer(kernel_size, num_layers) hidden_channels = self._extend_for_multilayer(hidden_channels, num_layers) if not len(kernel_size) == len(hidden_channels) == num_layers: raise ValueError('Inconsistent list length.') self.input_dim = in_channels self.hidden_dim = hidden_channels self.kernel_size = kernel_size self.num_layers = num_layers self.batch_first = batch_first self.return_all_layers = return_all_layers for i in range(0, self.num_layers): cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i - 1] self.cell_list.add_sublayer(name='{}'.format(i), sublayer=ConvLSTMCell( in_channels=cur_input_dim, hidden_channels=self.hidden_dim[i], kernel_size=self.kernel_size[i], training=training)) def forward(self, input_tensor, hidden_state=None): """ Parameters ---------- input_tensor: todo 5-D Tensor (b, t, c, h, w) hidden_state: todo None. todo implement stateful Returns ------- last_state_list, layer_output """ # Implement stateful ConvLSTM if hidden_state is not None: raise NotImplementedError() else: b, _, _, h, w = input_tensor.shape hidden_state = self._init_hidden(b, h, w) layer_output_list = [] last_state_list = [] seq_len = input_tensor.shape[1] cur_layer_input = input_tensor for layer_idx in range(self.num_layers): h, c = hidden_state[layer_idx] output_inner = [] for t in range(seq_len): h, c = self.cell_list['{}'.format(layer_idx)]( input_tensor=cur_layer_input[:, t, :, :, :], cur_state=[h, c]) output_inner.append(h) layer_output = fluid.layers.stack(output_inner, axis=1) cur_layer_input = layer_output layer_output_list.append(layer_output) last_state_list.append([h, c]) if not self.return_all_layers: layer_output_list = layer_output_list[-1:] last_state_list = last_state_list[-1:] return layer_output_list, last_state_list def _init_hidden(self, b, h, w): init_states = [] for i in range(self.num_layers): init_states.append(self.cell_list[i].init_hidden(b, h, w)) return init_states @staticmethod def _check_kernel_size_consistency(kernel_size): if not (isinstance(kernel_size, tuple) or (isinstance(kernel_size, list) and all([isinstance(elem, tuple) for elem in kernel_size]))): raise ValueError('`kernel_size` must be tuple or list of tuples') @staticmethod def _extend_for_multilayer(param, num_layers): if not isinstance(param, list): param = [param] * num_layers return param
class LinConPoo(Layer): def __init__(self, sequence_list): ''' @Brief 可用于自定义常用网络结构,将自定义的网络结构列表传入,返回Layer模型 实际上该类是用于`Conv2D`, `Pool2D`, `Linear`的排列组合 @Parameters sequence_list : 自定义网络结构列表, 列表每一元素为字典或列表, 指定每一层的参数 @Return 返回自定义的网络模型 @Examples ------------ >>> # 可以直接用来搭建VGGNet: >>> VGG_list_part1 = [ {'type':Conv2D, 'num_channels': 3, 'num_filters':64, 'filter_size':3, 'stride':1, 'padding':1, 'act':'relu', 'bias_attr':True}, {'type':Conv2D, 'num_channels':64, 'num_filters':64, 'filter_size':3, 'stride':1, 'padding':1, 'act':'relu', 'bias_attr':True}, {'type':Pool2D, 'pool_size':2, 'pool_type':'max', 'pool_stride':2, 'global_pooling':False}, {'type':Conv2D, 'num_channels':64, 'num_filters':128, 'filter_size':3, 'stride':1, 'padding':1, 'act':'relu', 'bias_attr':True}, {'type':Conv2D, 'num_channels':128, 'num_filters':128, 'filter_size':3, 'stride':1, 'padding':1, 'act':'relu', 'bias_attr':True}, {'type':Pool2D, 'pool_size':2, 'pool_type':'max', 'pool_stride':2, 'global_pooling':False}, {'type':Conv2D, 'num_channels':128, 'num_filters':256, 'filter_size':3, 'stride':1, 'padding':1, 'act':'relu', 'bias_attr':True}, {'type':Conv2D, 'num_channels':256, 'num_filters':256, 'filter_size':3, 'stride':1, 'padding':1, 'act':'relu', 'bias_attr':True}, {'type':Conv2D, 'num_channels':256, 'num_filters':256, 'filter_size':3, 'stride':1, 'padding':1, 'act':'relu', 'bias_attr':True}, {'type':Pool2D, 'pool_size':2, 'pool_type':'max', 'pool_stride':2, 'global_pooling':False}, {'type':Conv2D, 'num_channels':256, 'num_filters':512, 'filter_size':3, 'stride':1, 'padding':1, 'act':'relu', 'bias_attr':True}, {'type':Conv2D, 'num_channels':512, 'num_filters':512, 'filter_size':3, 'stride':1, 'padding':1, 'act':'relu', 'bias_attr':True}, {'type':Conv2D, 'num_channels':512, 'num_filters':512, 'filter_size':3, 'stride':1, 'padding':1, 'act':'relu', 'bias_attr':True}, {'type':Pool2D, 'pool_size':2, 'pool_type':'max', 'pool_stride':2, 'global_pooling':False}, {'type':Conv2D, 'num_channels':512, 'num_filters':512, 'filter_size':3, 'stride':1, 'padding':1, 'act':'relu', 'bias_attr':True}, {'type':Conv2D, 'num_channels':512, 'num_filters':512, 'filter_size':3, 'stride':1, 'padding':1, 'act':'relu', 'bias_attr':True}, {'type':Conv2D, 'num_channels':512, 'num_filters':512, 'filter_size':3, 'stride':1, 'padding':1, 'act':'relu', 'bias_attr':True}, {'type':Pool2D, 'pool_size':2, 'pool_type':'max', 'pool_stride':2, 'global_pooling':False}, ] >>> VGG_list_part2 = [ {'type':Linear, 'input_dim': 512*7*7, 'output_dim':4096, 'act':'relu', 'bias_attr':True}, {'type':Linear, 'input_dim':4096, 'output_dim':4096, 'act':'relu', 'bias_attr':True}, {'type':Linear, 'input_dim':4096, 'output_dim':64, 'act':'relu', 'bias_attr':True}, ] 在当前paddle版本1.7中, `paddle.fluid.layers.flatten`无法加入至`paddle.fluid.dygraph.Sequential`中, 所以我们将VGGNet以flatten为界拆成两部分 >>> VGG_part1 = LinConPoo(VGG_list_part1) >>> VGG_part2 = LinConPoo(VGG_list_part2) >>> import numpy as np >>> data = np.ones(shape=(8, 3, 224, 224), dtype=np.float32) # 将该data视作图像数据 >>> with fluid.dygraph.guard(): data = to_variable(data) x = VGG_part1(data, True) x = fluid.layers.flatten(x) x = VGG_part2(x, True) print(x.numpy().shape) >>> # 以上是手动搭建VGG16, 也可以直接调用VGG16类, 该类在`LinConPoo`类上进行封装 ''' super(LinConPoo, self).__init__() self.__sequence_list = copy.deepcopy(sequence_list) # 参数有效检验 if not isinstance(self.__sequence_list, list): raise ValueError('参数`sequence_list`必须为列表') # 每一层模型序列 self._layers_squence = Sequential() self._layers_list = [] LAYLIST = [Conv2D, Linear, Pool2D] for i, layer_arg in enumerate(self.__sequence_list): # 不改变原来字典或者列表的值 # layer_arg = layer_arg.copy() # 每一层传入的有可能是列表,也有可能是字典 if isinstance(layer_arg, dict): layer_class = layer_arg.pop('type') if not layer_class in LAYLIST: # 进行有效性检验 raise KeyError( "sequence_list中, 每一层的类型必须在`[Conv2D, Linear, Pool2D]`中") # 实例化该层对象 layer_obj = layer_class(**layer_arg) elif isinstance(layer_arg, list): layer_class = layer_arg.pop(0) if not layer_class in LAYLIST: # 进行有效性检验 raise KeyError( "sequence_list中, 每一层的类型必须在`[Conv2D, Linear, Pool2D]`中") # 实例化该层对象 layer_obj = layer_class(*layer_arg) else: raise ValueError("sequence_list中, 每一个元素必须是列表或字典") # 指定该层的名字 layer_name = layer_class.__name__ + str(i) # 将每一层添加到 `self._layers_list` 中 self._layers_list.append((layer_name, layer_obj)) self._layers_squence.add_sublayer(*(layer_name, layer_obj)) self._layers_squence = Sequential(*self._layers_list) def forward(self, inputs, show_shape=False): ''' @Parameters : inputs : 原始数据 show_shape : 是否显示每一步的shape, 调试时使用 ''' if show_shape: x = inputs for op in self._layers_list: x = op[1](x) print(op[0], '\t', x.shape) return x return self._layers_squence(inputs)