def __init__(self, **kwargs): super(get_model, self).__init__() self.grid_size = 4 self.grid_scale = 0.5 self.num_coarse = 1024 self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.__dict__.update(kwargs) # to update args, num_coarse, grid_size, grid_scale self.num_fine = self.grid_size ** 2 * self.num_coarse # 16384 self.meshgrid = [[-self.grid_scale, self.grid_scale, self.grid_size], [-self.grid_scale, self.grid_scale, self.grid_size]] self.feat = PCNEncoder(global_feat=True, channel=3) self.folding1 = nn.Sequential( nn.Linear(1024, 1024), nn.BatchNorm1d(1024), nn.ReLU(), nn.Linear(1024, 1024), nn.BatchNorm1d(1024), nn.ReLU(), nn.Linear(1024, self.num_coarse * 3)) self.folding2 = nn.Sequential( nn.Conv1d(1024+2+3, 512, 1), nn.BatchNorm1d(512), nn.ReLU(), nn.Conv1d(512, 512, 1), nn.BatchNorm1d(512), nn.ReLU(), nn.Conv1d(512, 3, 1))
def __init__(self, num_class, num_channel=9, **kwargs): super(get_model, self).__init__() self.num_class = num_class self.feat = PCNEncoder(global_feat=False, channel=num_channel) self.conv1 = nn.Conv1d(1280, 512, 1) self.conv2 = nn.Conv1d(512, 256, 1) self.conv3 = nn.Conv1d(256, 128, 1) self.conv4 = nn.Conv1d(128, self.num_class, 1) self.bn1 = nn.BatchNorm1d(512) self.bn2 = nn.BatchNorm1d(256) self.bn3 = nn.BatchNorm1d(128)
def __init__(self, num_class=40, num_channel=3, **kwargs): super(get_model, self).__init__() self.feat = PCNEncoder(global_feat=True, channel=num_channel) self.fc1 = nn.Linear(1024, 512) self.fc2 = nn.Linear(512, 256) self.fc3 = nn.Linear(256, num_class) self.dp1 = nn.Dropout(p=0.3) self.bn1 = nn.BatchNorm1d(512) self.dp2 = nn.Dropout(p=0.3) self.bn2 = nn.BatchNorm1d(256)