def __init__(self, in_channels, num_class, graph_cfg, edge_importance_weighting=True, data_bn=True, **kwargs): super().__init__() # load graph self.graph = Graph(**graph_cfg) A = torch.tensor(self.graph.A, dtype=torch.float32, requires_grad=False) self.register_buffer('A', A) # build networks spatial_kernel_size = 4 temporal_kernel_size = 9 kernel_size = (temporal_kernel_size, spatial_kernel_size) self.data_bn = nn.BatchNorm1d(in_channels * A.size(1)) if data_bn else iden kwargs0 = {k: v for k, v in kwargs.items() if k != 'dropout'} self.st_gcn_networks = nn.ModuleList(( st_gcn_block(in_channels, 64, kernel_size, 1, residual=False, **kwargs0), st_gcn_block(64, 64, kernel_size, 1, **kwargs), st_gcn_block(64, 64, kernel_size, 1, **kwargs), st_gcn_block(64, 64, kernel_size, 1, **kwargs), st_gcn_block(64, 128, kernel_size, 2, **kwargs), st_gcn_block(128, 128, kernel_size, 1, **kwargs), st_gcn_block(128, 128, kernel_size, 1, **kwargs), st_gcn_block(128, 256, kernel_size, 2, **kwargs), st_gcn_block(256, 256, kernel_size, 1, **kwargs), st_gcn_block(256, 256, kernel_size, 1, **kwargs), )) # initialize parameters for edge importance weighting self.M_weight = nn.Parameter(torch.ones(2).view(2), requires_grad=True).cuda() self.M_weight[0] = 0.5 self.M_weight[1] = 0.5 # fcn for prediction self.fcn = nn.Conv2d(256, num_class, kernel_size=1) # self.ALN = ANet(150,800, 625) self.ALN = ANet(375, 1500, 625 * 4) self.convm = torch.nn.Conv1d(in_channels=2, out_channels=1, kernel_size=1) ones = torch.Tensor(torch.ones((1, 2, 1))) ones[0, 1, 0] = 0.5 ones[0, 0, 0] = 0.5 self.convm.weight = torch.nn.Parameter(ones)
def __init__(self, config: StgConfig, graph_cfg, edge_importance_weighting=True, data_bn=True, **kwargs): super().__init__() # load graph self.graph = Graph(**graph_cfg) A = torch.tensor(self.graph.A, dtype=torch.float32, requires_grad=False) self.register_buffer('A', A) self.n_in_keypoints = A.size(1) # build networks spatial_kernel_size = A.size(0) temporal_kernel_size = config.temporal_kernel_size kernel_size = (temporal_kernel_size, spatial_kernel_size) in_channels = config.layers[0].in_channels self.data_bn = nn.BatchNorm1d(in_channels * A.size(1)) if data_bn else iden kwargs0 = {k: v for k, v in kwargs.items() if k != 'dropout'} # self.st_gcn_networks = nn.ModuleList(( # st_gcn_block(in_channels, # 64, # kernel_size, # 1, # residual=True, # **kwargs0), # st_gcn_block(64, 64, kernel_size, 1, **kwargs), # st_gcn_block(64, 64, kernel_size, 1, **kwargs), # st_gcn_block(64, 64, kernel_size, 1, **kwargs), # st_gcn_block(64, 128, kernel_size, 1, **kwargs), # st_gcn_block(128, 128, kernel_size, 1, **kwargs), # st_gcn_block(128, 128, kernel_size, 1, **kwargs), # st_gcn_block(128, 256, kernel_size, 1, **kwargs), # st_gcn_block(256, 256, kernel_size, 1, **kwargs), # )) # self.st_gcn_networks = nn.ModuleList([ StGcnBlock(in_channels=layer.in_channels, out_channels=layer.out_channels, kernel_size=kernel_size, stride=layer.temporal_stride, residual=layer.is_residual, **kwargs0) for layer in config.layers ]) # initialize parameters for edge importance weighting if edge_importance_weighting: self.edge_importance = nn.ParameterList([ nn.Parameter(torch.ones(self.A.size())) for i in self.st_gcn_networks ]) else: self.edge_importance = [1] * len(self.st_gcn_networks)
def __init__(self, in_channels, num_class, graph_cfg, edge_importance_weighting=True, data_bn=True, **kwargs): super().__init__() # load graph self.graph = Graph(**graph_cfg) A = torch.tensor(self.graph.A, dtype=torch.float32, requires_grad=False) self.register_buffer('A', A) # build networks spatial_kernel_size = 4 temporal_kernel_size = 9 kernel_size = (temporal_kernel_size, spatial_kernel_size) self.data_bn = nn.BatchNorm1d(in_channels * A.size(1)) if data_bn else iden kwargs0 = {k: v for k, v in kwargs.items() if k != 'dropout'} self.st_gcn_networks = nn.ModuleList(( st_gcn_block(in_channels, 64, kernel_size, 1, residual=False, **kwargs0), st_gcn_block(64, 64, kernel_size, 1, **kwargs), st_gcn_block(64, 64, kernel_size, 1, **kwargs), st_gcn_block(64, 64, kernel_size, 1, **kwargs), st_gcn_block(64, 128, kernel_size, 2, **kwargs), st_gcn_block(128, 128, kernel_size, 1, **kwargs), st_gcn_block(128, 128, kernel_size, 1, **kwargs), st_gcn_block(128, 256, kernel_size, 2, **kwargs), st_gcn_block(256, 256, kernel_size, 1, **kwargs), st_gcn_block(256, 256, kernel_size, 1, **kwargs), )) # initialize parameters for edge importance weighting # fcn for prediction self.ALNS = nn.ModuleList(( ANet(375,1500, 625*4), ANet(375, 1500, 625 * 4), ANet(375, 1500, 625 * 4), ANet(375, 1500, 625 * 4), ANet(375, 1500, 625 * 4), ANet(375, 1500, 625 * 4), ANet(375, 1500, 625 * 4), ANet(375, 1500, 625 * 4), ANet(375, 1500, 625 * 4), ANet(375, 1500, 625 * 4), )) self.fcn = nn.Conv2d(256, num_class, kernel_size=1)
def __init__(self, in_channels, num_class, graph_cfg, edge_importance_weighting=True, data_bn=True, **kwargs): super().__init__() print('In ST_GCN_18 ordinal: ', graph_cfg) # load graph self.graph = Graph(**graph_cfg) A = torch.tensor(self.graph.A, dtype=torch.float32, requires_grad=False) self.register_buffer('A', A) # build networks spatial_kernel_size = A.size(0) temporal_kernel_size = 15 kernel_size = (temporal_kernel_size, spatial_kernel_size) self.data_bn = nn.BatchNorm1d(in_channels * A.size(1)) if data_bn else lambda x: x kwargs0 = {k: v for k, v in kwargs.items() if k != 'dropout'} self.st_gcn_networks = nn.ModuleList(( st_gcn_block(in_channels, 16, kernel_size, 1, residual=False, **kwargs0), st_gcn_block(16, 16, kernel_size, 1, **kwargs), st_gcn_block(16, 32, kernel_size, 2, **kwargs), st_gcn_block(32, 32, kernel_size, 1, **kwargs), st_gcn_block(32, 64, kernel_size, 2, **kwargs), )) # initialize parameters for edge importance weighting if edge_importance_weighting: self.edge_importance = nn.ParameterList([ nn.Parameter(torch.ones(self.A.size())) for i in self.st_gcn_networks ]) else: self.edge_importance = [1] * len(self.st_gcn_networks) # fcn for prediction self.fcn = nn.Conv2d(64, 1, kernel_size=1)
def __init__(self, in_channels, num_class, graph_cfg, edge_importance_weighting=True, **kwargs): super().__init__() # load graph self.graph = Graph(**graph_cfg) A = torch.tensor(self.graph.A, dtype=torch.float32, requires_grad=False) self.register_buffer('A', A) # build networks self.data_bn = nn.BatchNorm1d(in_channels * A.size(1)) self.conv_shift_1 = nn.Conv2d(3, 32, 1) self.gcn_shift_1 = gcn(32, 32, 3, A) self.tcn_shift = tcn(32, 64, 3, 1, 1) self.gcn_shift_2 = gcn(64, 64, 3, A) self.conv_shift_2 = nn.Conv2d(64, 3, 1) self.tcn_pos_in = tcn(in_channels, 64, 3, 1, 1) self.tcn_motion_in = tcn(in_channels, 64, 3, 1, 1) self.tcn = nn.ModuleList( (tcn(64, 64, 3, 1, 1), tcn(64, 64, 3, 2, 1), gcn(64, 64, 3, A), tcn(64, 64, 3, 1, 1), tcn(64, 64, 3, 3, 1))) self.gcn = nn.ModuleList( (gcn(128, 64, 3, A), gcn(128, 64, 3, A), gcn(128, 64, 3, A), gcn(128, 64, 3, A), gcn(128, 128, 3, A))) # fcn for prediction self.gcn_end = gcn(128, 256, 3, A) self.fcn = nn.Conv2d(256, num_class, kernel_size=1) self.cSE_t = cSE(128) self.i = 0
def __init__(self, in_channels, num_class, graph_cfg, edge_importance_weighting=True, **kwargs): super().__init__() # load graph self.graph = Graph(**graph_cfg) A = torch.tensor(self.graph.A, dtype=torch.float32, requires_grad=False) self.register_buffer('A', A) # build networks spatial_kernel_size = A.size(0) temporal_kernel_size = 9 kernel_size = (temporal_kernel_size, spatial_kernel_size) self.data_bn = nn.BatchNorm1d(in_channels * A.size(1)) kwargs0 = {k: v for k, v in kwargs.items() if k != 'dropout'} self.st_gcn_networks = nn.ModuleList(( # st_gcn_block(in_channels, # 64, # kernel_size, # 1, # residual=False, # **kwargs0), st_gcn_block(64, 64, kernel_size, 1, **kwargs), st_gcn_block(64, 64, kernel_size, 1, **kwargs), st_gcn_block(64, 64, kernel_size, 1, **kwargs), st_gcn_block(64, 128, kernel_size, 2, **kwargs), st_gcn_block(128, 128, kernel_size, 1, **kwargs), st_gcn_block(128, 128, kernel_size, 1, **kwargs), st_gcn_block(128, 256, kernel_size, 2, **kwargs), st_gcn_block(256, 256, kernel_size, 1, **kwargs), st_gcn_block(256, 256, kernel_size, 1, **kwargs), )) # initialize parameters for edge importance weighting if edge_importance_weighting: self.edge_importance = nn.ParameterList([ nn.Parameter(torch.ones(self.A.size())) for i in self.st_gcn_networks ]) else: self.edge_importance = [1] * len(self.st_gcn_networks) self.conv_shift_1 = nn.Conv2d(3, 64, 1) self.gcn_shift_1 = st_gcn_block(64, 128, kernel_size, 1) self.gcn_shift_2 = st_gcn_block(128, 128, kernel_size, 1) self.conv_shift_2 = nn.Conv2d(128, 3, 1) self.A_shift_1 = nn.Parameter(torch.ones(self.A.size())) self.A_shift_2 = nn.Parameter(torch.ones(self.A.size())) self.tcn_motion_in = nn.Conv2d(in_channels, 64, 1) self.tcn_pos_in = nn.Conv2d(in_channels, 64, 1) self.conv_fusion_in = nn.Conv2d(128, 64, 1) # fcn for prediction self.fcn = nn.Conv2d(256, num_class, kernel_size=1)
def __init__(self, in_channels, num_class, graph_cfg, edge_importance_weighting=True, data_bn=True, **kwargs): super().__init__() # load graph self.graph = Graph(**graph_cfg) A = torch.tensor(self.graph.A, dtype=torch.float32, requires_grad=False) self.register_buffer('A', A) # build networks spatial_kernel_size = 3 temporal_kernel_size = 9 kernel_size = (temporal_kernel_size, spatial_kernel_size) self.data_bn = nn.BatchNorm1d(in_channels * A.size(1)) if data_bn else iden kwargs0 = {k: v for k, v in kwargs.items() if k != 'dropout'} # ALN self.st_gcn_networks_A = nn.ModuleList(( st_gcn_block(in_channels, 64, kernel_size, 1, residual=False, **kwargs0), st_gcn_block(64, 64, kernel_size, 1, **kwargs), st_gcn_block(64, 64, kernel_size, 1, **kwargs), st_gcn_block(64, 64, kernel_size, 1, **kwargs), st_gcn_block(64, 128, kernel_size, 2, **kwargs), st_gcn_block(128, 128, kernel_size, 1, **kwargs), st_gcn_block(128, 128, kernel_size, 1, **kwargs), st_gcn_block(128, 256, kernel_size, 2, **kwargs), st_gcn_block(256, 256, kernel_size, 1, **kwargs), st_gcn_block(256, 256, kernel_size, 1, **kwargs), )) self.edge_importance_A = nn.ParameterList([ nn.Parameter(torch.ones(spatial_kernel_size, 25, 25)) for i in self.st_gcn_networks_A ]) self.fcn_A = nn.Conv2d(256, num_class, kernel_size=1) self.ALN = ANet(375, 1500, 625 * 3) # STGCN kwargs0 = {k: v for k, v in kwargs.items() if k != 'dropout'} self.st_gcn_networks_S = nn.ModuleList( ( st_gcn_block(in_channels, 64, kernel_size, isStgcn=True, stride=1, residual=False, **kwargs0), st_gcn_block(64, 64, kernel_size, isStgcn=True, stride=1, **kwargs), st_gcn_block(64, 64, kernel_size, isStgcn=True, stride=1, **kwargs), st_gcn_block(64, 64, kernel_size, isStgcn=True, stride=1, **kwargs), st_gcn_block(64, 128, kernel_size, isStgcn=True, stride=2, **kwargs), st_gcn_block(128, 128, kernel_size, isStgcn=True, stride=1, **kwargs), st_gcn_block(128, 128, kernel_size, isStgcn=True, stride=1, **kwargs), st_gcn_block(128, 256, kernel_size, isStgcn=True, stride=2, **kwargs), st_gcn_block(256, 256, kernel_size, isStgcn=True, stride=1, **kwargs), st_gcn_block(256, 256, kernel_size, isStgcn=True, stride=1, **kwargs), )) self.edge_importance_S = nn.ParameterList([ nn.Parameter(torch.ones(spatial_kernel_size, 25, 25)) for i in self.st_gcn_networks_S ]) self.fcn_S = nn.Conv2d(256, num_class, kernel_size=1)