예제 #1
0
    def __init__(self, data_shape, num_classes: int, graph, **kwargs):
        super().__init__()
        num_layers = kwargs.get("num_layers", 10)
        fusion_type = kwargs.get("fusion", "concatenate")
        patch_feature_input_dim = kwargs.get("patch_feature_input_dim", 512)
        patch_feature_hidden_dim = kwargs.get("patch_feature_hidden_dim", patch_feature_input_dim)
        patch_feature_output_dim = kwargs.get("patch_feature_output_dim", patch_feature_input_dim)

        if fusion_type == "concatenate":
            num_channels = data_shape["skeleton"][-1] + patch_feature_output_dim
        else:
            num_channels = data_shape["skeleton"][-1]

        self.patch_feature_dim_reducer = lambda x: x
        if patch_feature_input_dim != patch_feature_output_dim:
            self.patch_feature_dim_reducer = nn.Sequential(
                nn.Linear(patch_feature_input_dim, patch_feature_hidden_dim),
                nn.Linear(patch_feature_hidden_dim, patch_feature_output_dim)
            )

        agcn_input_shape = (data_shape["skeleton"][0], data_shape["skeleton"][1], graph.num_vertices,
                            num_channels)
        self.agcn = agcn.Model(agcn_input_shape, num_classes, graph, num_layers=num_layers,
                               without_fc=kwargs.get("without_fc", False))
        self.fusion = get_fusion(fusion_type, concatenate_dim=-1)
 def __init__(self, data_shape, num_classes: int, graph, **kwargs):
     super().__init__()
     num_layers = kwargs.get("num_layers", 10)
     self.agcn = agcn.Model(data_shape["rgb"],
                            num_classes,
                            graph,
                            num_layers=num_layers,
                            without_fc=kwargs.get("without_fc", False))
예제 #3
0
 def __init__(self, data_shape, num_classes: int, graph, **kwargs):
     super().__init__()
     num_layers = kwargs.get("num_layers", 10)
     shape = list(data_shape["skeleton"])
     shape[-1] += data_shape["inertial"][-1]  # add imu channels to shape
     self.fusion = get_fusion("concatenate", concatenate_dim=-1)
     self.agcn = agcn.Model(tuple(shape), num_classes, graph, num_layers=num_layers,
                            without_fc=kwargs.get("without_fc", False))
    def __init__(self, data_shape, num_classes: int, graph, **kwargs):
        super().__init__()
        num_layers = kwargs.get("num_layers", 10)

        edges = kwargs["rgb_patch_groups_edges"]
        edges = [tuple(map(int, edge.split(", "))) for edge in edges]
        graph = Graph(edges)

        self.agcn = agcn.Model(data_shape["rgb"],
                               num_classes,
                               graph,
                               num_layers=num_layers,
                               without_fc=kwargs.get("without_fc", False))
    def __init__(self, data_shape, num_classes: int, graph, **kwargs):
        super().__init__()
        num_layers = kwargs.get("num_layers", 10)

        self.rgb_encoder = RgbCnnEncoder(rgb_num_vertices=graph.num_vertices,
                                         **kwargs)

        agcn_input_shape = (self.rgb_encoder.num_bodies, data_shape["rgb"][0],
                            self.rgb_encoder.num_vertices,
                            self.rgb_encoder.num_encoded_channels)
        self.agcn = agcn.Model(agcn_input_shape,
                               num_classes,
                               graph,
                               num_layers=num_layers,
                               without_fc=kwargs.get("without_fc", False))
예제 #6
0
    def __init__(self, data_shape, num_classes: int, graph, **kwargs):
        super().__init__()
        num_layers = kwargs.get("num_layers", 10)

        num_additional_nodes = kwargs.pop("num_additional_nodes", 3)
        self.rgb_encoder = RgbR2P1DEncoder(num_encoded_channels=data_shape["skeleton"][-1],
                                           num_additional_nodes=num_additional_nodes * data_shape["skeleton"][0],
                                           **kwargs)

        graph = graph.with_new_edges([(graph.num_vertices + i, graph.center_joint)
                                      for i in range(num_additional_nodes)])

        agcn_input_shape = list(data_shape["skeleton"])
        agcn_input_shape[2] = graph.num_vertices
        self.agcn = agcn.Model(tuple(agcn_input_shape), num_classes, graph, num_layers=num_layers,
                               without_fc=kwargs.get("without_fc", False))
예제 #7
0
    def __init__(self, data_shape, num_classes: int, graph, **kwargs):
        super().__init__()
        num_layers = kwargs.get("num_layers", 10)
        fusion_type = kwargs.get("fusion", "concatenate")

        self.rgb_encoder = RgbCnnEncoder(rgb_num_vertices=graph.num_vertices, **kwargs)

        if fusion_type == "concatenate":
            num_channels = data_shape["skeleton"][-1] + self.rgb_encoder.num_encoded_channels
        else:
            num_channels = data_shape["skeleton"][-1]

        agcn_input_shape = (self.rgb_encoder.num_bodies, data_shape["rgb"][0], graph.num_vertices,
                            num_channels)
        self.agcn = agcn.Model(agcn_input_shape, num_classes, graph, num_layers=num_layers,
                               without_fc=kwargs.get("without_fc", False))
        self.fusion = get_fusion(fusion_type, concatenate_dim=-1)
    def __init__(self, data_shape, num_classes: int, graph, **kwargs):
        super().__init__()
        num_layers = kwargs.get("num_layers", 10)
        dropout = kwargs.get("dropout", 0.)
        fusion_type = kwargs.get("fusion", "concatenate")
        self.r2p1d = rgb_models.RgbR2p1DModel(data_shape["rgb"], num_classes, graph, without_fc=True, model_depth=18,
                                              **kwargs)
        self.agcn = agcn.Model(data_shape["skeleton"], num_classes, graph, num_layers=num_layers, without_fc=True,
                               dropout=dropout)
        self.fusion = get_fusion(fusion_type, concatenate_dim=-1)
        self.fc1 = nn.Linear(self.r2p1d.r2p1d.out_dim, self.agcn.out_channels)

        if fusion_type == "concatenate":
            out_dim = self.agcn.out_channels * 2
        else:
            out_dim = self.agcn.out_channels

        self.fc2 = nn.Linear(out_dim, num_classes)
    def __init__(self, data_shape, num_classes: int, graph, **kwargs):
        super().__init__()
        num_layers = kwargs.get("num_layers", 10)
        dropout = kwargs.get("dropout", 0.)
        fusion_type = kwargs.get("fusion", "concatenate")
        if kwargs.pop("skeleton_imu_spatial_fusion", False):
            graph = get_skeleton_imu_fusion_graph(graph, **kwargs)
        self.imu_gcn = imu_models.ImuGCN(data_shape, num_classes, inter_signal_back_connections=True,
                                         include_additional_top_layer=True, without_fc=True, **kwargs)
        self.agcn = agcn.Model(data_shape["skeleton"], num_classes, graph, num_layers=num_layers, without_fc=True,
                               dropout=dropout)
        self.fusion = get_fusion(fusion_type, concatenate_dim=-1)

        if fusion_type == "concatenate":
            out_dim = self.agcn.out_channels * 2
        else:
            out_dim = self.agcn.out_channels

        self.fc = nn.Linear(out_dim, num_classes)
예제 #10
0
 def __init__(self, data_shape, num_classes: int, graph, **kwargs):
     super().__init__()
     num_layers = kwargs.get("num_layers", 10)
     skeleton_imu_graph = get_skeleton_imu_fusion_graph(graph, **kwargs)
     self.agcn = agcn.Model(data_shape["skeleton"], num_classes, skeleton_imu_graph, num_layers=num_layers,
                            without_fc=kwargs.get("without_fc", False))