def __init__(self,
                 mesh,
                 batch_size=None,
                 bottleneck_dim=1,
                 pretrained='GraphConv',
                 weights_path=None,
                 freeze_nlayers=5):
        super(SensorMeshToDryspotModel, self).__init__()
        self.mesh = mesh
        self.batch_size = batch_size

        self.bottleneck_dim = bottleneck_dim

        self.gc1 = GraphConv(1, 16)
        self.gc2 = GraphConv(16, 32)
        self.gc3 = GraphConv(32, 64)
        self.gc4 = GraphConv(64, 32)
        self.gc5 = GraphConv(32, bottleneck_dim)

        self.conv1 = nn.Conv2d(bottleneck_dim, 16, 5, stride=1)
        self.conv2 = nn.Conv2d(16, 32, 5, stride=1)
        self.conv3 = nn.Conv2d(32, 64, 3, stride=1)
        self.conv4 = nn.Conv2d(64, 128, 3)
        self.conv5 = nn.Conv2d(128, 256, 3, stride=2)

        self.pool = nn.MaxPool2d(2, 2)

        # self.avg_pool = nn.AdaptiveAvgPool1d(168*168*bottleneck_dim)
        self.adaptive_maxpool = nn.AdaptiveMaxPool1d(100 * 100 *
                                                     bottleneck_dim)
        self.linear1 = nn.Linear(1024, 256)
        self.linear2 = nn.Linear(256, 64)
        self.linear3 = nn.Linear(64, 1)

        if pretrained == 'GraphConv' and weights_path is not None:
            logger = logging.getLogger(__name__)
            weights = load_GraphConv_layers_from_path(
                path=weights_path,
                layer_names={'gc1', 'gc2', 'gc3', 'gc4', 'gc5'})
            incomp = self.load_state_dict(weights, strict=False)
            logger.debug(f'All layers: {self.state_dict().keys()}')
            logger.debug(f'Loaded weights but the following: {incomp}')
            print("Loaded layers")

        for i, c in enumerate(self.children()):
            logger = logging.getLogger(__name__)
            logger.info(f'Freezing: {c}')

            for param in c.parameters():
                param.requires_grad = False
            if i == freeze_nlayers - 1:
                break

        self.count = 0
    def __init__(self, mesh, batch_size=None):
        super(SensorMeshToFlowFrontModel, self).__init__()

        self.batch_size = batch_size

        self.mesh = mesh

        self.gc1 = GraphConv(1, 16).cuda()
        self.gc2 = GraphConv(16, 32).cuda()
        self.gc3 = GraphConv(32, 64).cuda()
        self.gc4 = GraphConv(64, 32).cuda()
        self.gc5 = GraphConv(32, 1).cuda()
    def __init__(self,
                 mesh,
                 batch_size=None,
                 bottleneck_dim=1,
                 pretrained='GraphConv',
                 weights_path=None,
                 freeze_nlayers=5):
        super(SensorMeshToDryspotResnet, self).__init__()
        self.mesh = mesh
        self.batch_size = batch_size

        self.bottleneck_dim = bottleneck_dim

        self.gc1 = GraphConv(1, 16)
        self.gc2 = GraphConv(16, 32)
        self.gc3 = GraphConv(32, 64)
        self.gc4 = GraphConv(64, 32)
        self.gc5 = GraphConv(32, bottleneck_dim)

        self.classifier = m.resnet18(pretrained=True)
        num_ftrs = self.classifier.fc.in_features
        self.classifier.fc = torch.nn.Linear(num_ftrs, 1)

        self.upsample = nn.Upsample(size=(224, 224))

        self.adaptive_maxpool = nn.AdaptiveMaxPool1d(168 * 168 *
                                                     bottleneck_dim)

        if pretrained == 'GraphConv' and weights_path is not None:
            logger = logging.getLogger(__name__)
            weights = load_GraphConv_layers_from_path(
                path=weights_path,
                layer_names={'gc1', 'gc2', 'gc3', 'gc4', 'gc5'})
            incomp = self.load_state_dict(weights, strict=False)
            logger.debug(f'All layers: {self.state_dict().keys()}')
            logger.debug(f'Loaded weights but the following: {incomp}')
            print("Loaded layers")

            for i, c in enumerate(self.children()):
                print("Freezing")
                logger = logging.getLogger(__name__)
                logger.info(f'Freezing: {c}')

                for param in c.parameters():
                    param.requires_grad = False
                if i == freeze_nlayers - 1:
                    break

        self.count = 0
示例#4
0
    def __init__(self, img_feat_dim, vert_feat_dim, hidden_dim, stage_depth, gconv_init="normal"):
        """
        Args:
          img_feat_dim: Dimension of features we will get from vert_align
          vert_feat_dim: Dimension of vert_feats we will receive from the
                        previous stage; can be 0
          hidden_dim: Output dimension for graph-conv layers
          stage_depth: Number of graph-conv layers to use
          gconv_init: How to initialize graph-conv layers
        """
        super(MeshRefinementStage, self).__init__()

        # fc layer to reduce feature dimension
        self.bottleneck = nn.Linear(img_feat_dim, hidden_dim)

        # deform layer
        self.verts_offset = nn.Linear(hidden_dim + 3, 3)

        # graph convs
        self.gconvs = nn.ModuleList()
        for i in range(stage_depth):
            if i == 0:
                input_dim = hidden_dim + vert_feat_dim + 3
            else:
                input_dim = hidden_dim + 3
            gconv = GraphConv(input_dim, hidden_dim, init=gconv_init, directed=False)
            self.gconvs.append(gconv)

        # initialization
        nn.init.normal_(self.bottleneck.weight, mean=0.0, std=0.01)
        nn.init.constant_(self.bottleneck.bias, 0)

        nn.init.zeros_(self.verts_offset.weight)
        nn.init.constant_(self.verts_offset.bias, 0)
示例#5
0
    def __init__(self,image_size=(512,512),input_dim=3):
        super(MeshRenderer,self).__init__()
        self.image_height = image_size[0]
        self.image_width = image_size[1]

        self.graph_layer1 = GraphConv(input_dim=input_dim,output_dim=256)
        self.graph_layer2 = GraphConv(input_dim=256,output_dim=512)
        self.graph_layer3 = GraphConv(input_dim=128,output_dim=128)
        self.graph_layer4 = GraphConv(input_dim=128,output_dim=3*128*128)

        self.linear0 = nn.Linear(input_dim,16)
        self.linear1 = nn.Linear(16,32)

        self.linear2 = nn.Linear(512,1024)
        self.linear3 = nn.Linear(1024,3*128*128)
        self.upsample = nn.Upsample(scale_factor=4)
示例#6
0
    def __init__(self, cfg):
        super(GraphConvClf, self).__init__()
        input_dim = cfg.D.INPUT_MESH_FEATS
        hidden_dims = cfg.D.HIDDEN_DIMS
        classes = cfg.D.CLASSES
        gconv_init = cfg.D.CONV_INIT

        # Graph Convolution Network
        self.gconvs = nn.ModuleList()
        dims = [input_dim] + hidden_dims
        for i in range(len(dims) - 1):
            self.gconvs.append(
                GraphConv(dims[i],
                          dims[i + 1],
                          init=gconv_init,
                          directed=False))

        self.fc1 = nn.Linear(dims[-1], dims[-1])
        self.fc2 = nn.Linear(dims[-1], dims[-1])
        self.fc3 = nn.Linear(dims[-1], classes)
    def __init__(self,
                 img_feat_dim,
                 vert_feat_dim,
                 hidden_dim,
                 stage_depth,
                 gconv_init="normal"):
        """
        Args:
          img_feat_dim (int): Dimension of features we will get from vert_align
          vert_feat_dim (int): Dimension of vert_feats we will receive from the
                               previous stage; can be 0
          hidden_dim (int): Output dimension for graph-conv layers
          stage_depth (int): Number of graph-conv layers to use
          gconv_init (int): Specifies weight initialization for graph-conv layers
          checkpoint : trained model from Mesh RCNN
        """
        super(MeshRefinementStage, self).__init__()

        self.bottleneck = nn.Linear(img_feat_dim, hidden_dim)

        self.vert_offset = nn.Linear(hidden_dim + 3, 3)

        self.gconvs = nn.ModuleList()
        for i in range(stage_depth):
            if i == 0:
                input_dim = hidden_dim + vert_feat_dim + 3
            else:
                input_dim = hidden_dim + 3
            gconv = GraphConv(input_dim,
                              hidden_dim,
                              init=gconv_init,
                              directed=False)
            self.gconvs.append(gconv)

        # initialization for bottleneck and vert_offset
        nn.init.normal_(self.bottleneck.weight, mean=0.0, std=0.01)
        nn.init.constant_(self.bottleneck.bias, 0)

        nn.init.zeros_(self.vert_offset.weight)
        nn.init.constant_(self.vert_offset.bias, 0)