Пример #1
0
    def forward(self, a, b):
        position_loss = self.L1(a[:, 0:3, :], b[:, 0:3, :])
        normal_loss = torch.mean(
            1 - Ops.cosine_similarity(a[:, 3:6, :], b[:, 3:6, :]))
        self.nlogger.update(normal_loss)

        return normal_loss
Пример #2
0
    def __init__(self,
                 enc_size,
                 n_classes,
                 ae_model,
                 batch_size,
                 name="EncSVM"):
        super(EncodingSVM, self).__init__(name)

        self.batch_size = batch_size
        self.enc_size = enc_size
        self.n_classes = n_classes
        self.ae_model = ae_model

        self.upsample = Ops.NNUpsample1d()

        alpha = 32
        self.pools = []
        self.pools.append(nn.MaxPool1d(kernel_size=alpha, stride=alpha))
        self.pools.append(nn.MaxPool1d(kernel_size=alpha / 2,
                                       stride=alpha / 2))
        self.pools.append(nn.MaxPool1d(kernel_size=alpha / 4,
                                       stride=alpha / 4))
        #self.pools.append(nn.MaxPool1d(kernel_size=alpha/8, stride=alpha/8))

        self.fc = nn.Linear(self.enc_size, self.n_classes)
Пример #3
0
    def forward(self, a, b):
        pcsize = a.size()[-1]

        if pcsize != self.n_samples:
            indices = np.arange(pcsize)
            np.random.shuffle(indices)
            indices = indices[:self.n_samples]
            a = a[:, :, indices]
            b = b[:, :, indices]

        a_points = torch.transpose(a, 1, 2)[:, :, 0:3]
        b_points = torch.transpose(b, 1, 2)[:, :, 0:3]
        pd = Ops.batch_pairwise_dist(a_points, b_points)
        #mma = torch.stack([a_points]*self.n_samples, dim=1)
        #mmb = torch.stack([b_points]*self.n_samples, dim=1).transpose(1,2)
        d = pd

        a_normals = torch.transpose(a, 1, 2)[:, :, 3:6]
        b_normals = torch.transpose(b, 1, 2)[:, :, 3:6]
        mma = torch.stack([a_normals] * self.n_samples, dim=1)
        mmb = torch.stack([b_normals] * self.n_samples, dim=1).transpose(1, 2)
        d_norm = 1 - torch.sum(mma * mmb, 3).squeeze()
        d += self.normal_weight * d_norm

        normal_min_mean = torch.min(d_norm, dim=2)[0].mean()
        self.nlogger.update(normal_min_mean)

        chamfer_sym = torch.min(d, dim=2)[0].sum() + torch.min(d,
                                                               dim=1)[0].sum()
        chamfer_sym /= a.size()[0]

        return chamfer_sym
Пример #4
0
    def forward(self, x):

        mean = torch.mean(x, dim=0).pow(2)
        cov = Ops.cov(x)

        cov_loss = torch.mean(
            (Variable(torch.eye(cov.size()[0]).cuda()) - cov).pow(2))

        return torch.mean(mean) + cov_loss
Пример #5
0
    def forward(self, x):
        real, fake = x
        _, real_features = real
        _, fake_features = fake

        real_features = torch.cat(real_features, dim=1)
        fake_features = torch.cat(fake_features, dim=1)

        real_mean = torch.mean(real_features, dim=0)
        fake_mean = torch.mean(fake_features, dim=0)

        real_cov = Ops.cov(real_features)
        fake_cov = Ops.cov(fake_features)

        mean_loss = torch.sum((real_mean - fake_mean).pow(2))
        cov_loss = torch.sum((real_cov - fake_cov).pow(2))

        return mean_loss + cov_loss
Пример #6
0
    def __init__(self,
                 size,
                 dim,
                 batch_size=64,
                 kernel_size=2,
                 name="MRI2S",
                 pretrained=False,
                 arch=True):
        super(MultiResImageToShape, self).__init__(name)

        self.size = size
        self.dim = dim
        self.kernel_size = kernel_size
        self.batch_size = batch_size
        if arch == 'vgg':
            self.encoder = torchvision.models.vgg11(pretrained=pretrained)
        elif arch == 'alexnet':
            self.encoder = torchvision.models.alexnet(pretrained=pretrained)
        self.encoder.classifier._modules['6'] = nn.Linear(4096, 16 * 1024)
        self.dec_modules = nn.ModuleList()
        self.base_size = 16

        self.upsample = Ops.NNUpsample1d()
        self.pool = nn.MaxPool1d(kernel_size=4, stride=4)

        custom_nfilters = [128, 128, 128, 256, 512, 512, 1024, 1024, 1024]
        custom_nfilters.reverse()
        custom_nfilters = np.array(custom_nfilters)

        current_size = self.base_size
        layer_num = 1
        padding = (self.kernel_size - 1) / 2
        while current_size < self.size:
            in_channels = custom_nfilters[layer_num - 1]
            print in_channels
            out_channels = custom_nfilters[layer_num]
            conv_dec = MultiResConvTranspose1d('up{}'.format(layer_num),
                                               in_channels, out_channels)
            current_size *= 2
            in_channels = out_channels
            layer_num += 1

            self.dec_modules.append(conv_dec)

        self.final_conv = nn.Sequential()
        self.final_conv.add_module(
            'final_conv1',
            nn.ConvTranspose1d(custom_nfilters[-1] * 3,
                               128,
                               1,
                               stride=1,
                               padding=0))
        self.final_conv.add_module('bn_final', nn.BatchNorm1d(128))
        self.final_conv.add_module('relu_final', nn.ReLU(inplace=True))
        self.final_conv.add_module(
            'final_conv2', nn.ConvTranspose1d(128, 3, 1, stride=1, padding=0))
        self.final_conv.add_module('tanh_final', nn.Tanh())
Пример #7
0
    def __init__(self,
                 size,
                 dim,
                 batch_size=64,
                 kernel_size=2,
                 name="MRTDecoder"):
        super(MRTDecoder, self).__init__(name)

        self.size = size
        self.dim = dim
        self.kernel_size = kernel_size
        self.batch_size = batch_size

        self.z = nn.Parameter(torch.randn(16 * 1024))
        self.dec_modules = nn.ModuleList()
        self.base_size = 16

        self.upsample = Ops.NNUpsample1d()
        self.pool = nn.MaxPool1d(kernel_size=4, stride=4)

        custom_nfilters = [128, 128, 128, 256, 512, 512, 1024, 1024, 1024]
        custom_nfilters.reverse()
        custom_nfilters = np.array(custom_nfilters)
        custom_nfilters[1:] /= 2

        current_size = self.base_size
        layer_num = 1
        padding = (self.kernel_size - 1) / 2
        while current_size < self.size:
            in_channels = custom_nfilters[layer_num - 1]
            out_channels = custom_nfilters[layer_num]
            conv_dec = MultiResConvTranspose1d('up{}'.format(layer_num),
                                               in_channels, out_channels)
            current_size *= 2
            in_channels = out_channels
            layer_num += 1

            self.dec_modules.append(conv_dec)

        self.final_conv = nn.Sequential()
        self.final_conv.add_module(
            'final_conv1',
            nn.ConvTranspose1d(custom_nfilters[-1] * 3,
                               128,
                               1,
                               stride=1,
                               padding=0))
        self.final_conv.add_module('bn_final', nn.BatchNorm1d(128))
        self.final_conv.add_module('relu_final', nn.ReLU(inplace=True))
        self.final_conv.add_module(
            'final_conv2', nn.ConvTranspose1d(128, 3, 1, stride=1, padding=0))
        self.final_conv.add_module('tanh_final', nn.Tanh())
Пример #8
0
    def __init__(self, name, in_channels, out_channels, blocktype, activation):
        super(MultiResBlock1d, self).__init__()

        self.upsample = Ops.NNUpsample1d()
        self.pool = nn.MaxPool1d(kernel_size=4, stride=4)
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.name = name

        self.conv0 = nn.Sequential()
        self.conv0.add_module(
            '{}_conv0'.format(self.name),
            blocktype(self.in_channels * 2,
                      self.out_channels,
                      kernel_size=2,
                      stride=2,
                      padding=0))
        self.conv0.add_module('{}_bn0'.format(self.name),
                              nn.BatchNorm1d(self.out_channels))
        self.conv0.add_module('{}_activation0'.format(self.name), activation)

        self.conv1 = nn.Sequential()
        self.conv1.add_module(
            '{}_conv1'.format(self.name),
            blocktype(self.in_channels * 3,
                      self.out_channels,
                      kernel_size=2,
                      stride=2,
                      padding=0))
        self.conv1.add_module('{}_bn1'.format(self.name),
                              nn.BatchNorm1d(self.out_channels))
        self.conv1.add_module('{}_activation1'.format(self.name), activation)

        self.conv2 = nn.Sequential()
        self.conv2.add_module(
            '{}_conv2'.format(self.name),
            blocktype(self.in_channels * 2,
                      self.out_channels,
                      kernel_size=2,
                      stride=2,
                      padding=0))
        self.conv2.add_module('{}_bn2'.format(self.name),
                              nn.BatchNorm1d(self.out_channels))
        self.conv2.add_module('{}_activation2'.format(self.name), activation)
Пример #9
0
    def chamfer_batch(self, a, b):
        pcsize = a.size()[-1]

        if pcsize != self.n_samples:
            indices = np.arange(pcsize).astype(int)
            np.random.shuffle(indices)
            indices = torch.from_numpy(indices[:self.n_samples]).cuda()
            a = a[:, :, indices]
            b = b[:, :, indices]

        a = torch.transpose(a, 1, 2).contiguous()
        b = torch.transpose(b, 1, 2).contiguous()

        if self.cuda_opt:
            d1, d2 = self.dist(a, b)
            out = torch.sum(d1) + torch.sum(d2)
            return out
        else:
            d = Ops.batch_pairwise_dist(a, b)
            return torch.min(d, dim=2)[0].sum() + torch.min(d, dim=1)[0].sum()
Пример #10
0
    def evaluate(self):
        n_batches = 0
        all_correct_points = 0
        miou = 0

        total_error_class = np.zeros((13, 2))
        total_count_class = np.zeros(13)

        in_data = None
        out_data = None
        target = None

        n_iter = 0.0
        total_d = 0.0

        self.model.eval()

        for i, data in enumerate(self.val_loader, 0):
            if data[1].size()[0] != self.model.batch_size:
                continue

            in_data = Variable(data[0].cuda())
            target = Variable(data[1]).cuda()
            class_id = data[2][0]

            out_data = self.model(in_data)
            
            pd = Ops.batch_pairwise_dist(out_data.transpose(1,2), 
                target.transpose(1,2))
            pd = torch.sqrt(pd)

            total_error_class[class_id, 0] += torch.min(pd, dim=2)[0].data.cpu().numpy().mean()
            total_error_class[class_id, 1] += torch.min(pd, dim=1)[0].data.cpu().numpy().mean()
            total_count_class[class_id] += 1.0

            scalar_group = {}

            #Iterate over classes
            for c in xrange(13):
                if total_count_class[c] > 0.0:
                    scalar_group['class{}_error_pred'.format(c)] =  total_error_class[c, 0]/total_count_class[c]
                    scalar_group['class{}_error_gt'.format(c)] =  total_error_class[c, 1]/total_count_class[c]

            np.save('total_error_class.npy', total_error_class)
            np.save('total_count_class.npy', total_count_class)
            self.writer.add_scalars('class_errors', scalar_group, i)

            n_iter += self.model.batch_size

            #Save some point clouds for visualization
            if i < 50:
                results_dir = os.path.join("eval", self.model.name)
                if not os.path.exists(results_dir):
                    os.makedirs(results_dir)
                write_image_pc(os.path.join(results_dir, "out_{}".format(str(2*i).zfill(4))),
                        (data[3][0, :, :, :], out_data[0, :, :].data.cpu()))
                save_torch_pc(os.path.join(results_dir, "out_{}.obj".format(str(2*i+1).zfill(4))), target)
                print "Test PC saved."

        #Saves results
        np.save('total_error_class.npy', total_error_class)
        np.save('total_count_class.npy', total_count_class)
        print total_d/n_iter
Пример #11
0
    def __init__(self,
                 size,
                 dim,
                 batch_size=64,
                 enc_size=100,
                 kernel_size=2,
                 axis_file='rpt_axis.npy',
                 name="PointSeg"):
        super(UNetMRTDecoder, self).__init__(name)

        self.init_channels = 128
        self.size = size
        self.dim = dim
        self.batch_size = batch_size
        self.kernel_size = kernel_size
        self.enc_size = enc_size
        self.enc_modules = nn.ModuleList()
        self.dec_modules = nn.ModuleList()
        self.upsample = Ops.NNUpsample1d()
        self.pool = nn.AvgPool1d(kernel_size=4, stride=4)
        #self.z = nn.Parameter(torch.randn(self.size*3).view(batch_size, 3, -1))
        self.z = Variable(torch.randn(self.size * 3).view(batch_size, 3,
                                                          -1)).cuda()

        #custom_nfilters = [3, 128, 128, 128, 256, 265, 256,
        #        512, 512, 512, 1024, 1024, 2048]
        custom_nfilters = [
            3, 4, 8, 16, 32, 64, 128, 128, 128, 128, 256, 256, 256
        ]
        custom_nfilters = np.array(custom_nfilters)
        #custom_nfilters[1:] /= 4

        current_size = self.size
        layer_num = 1
        padding = (self.kernel_size - 1) / 2
        n_channels = []
        while current_size > 64:
            in_channels = custom_nfilters[layer_num - 1]
            out_channels = custom_nfilters[layer_num]
            conv_enc = MultiResConv1d('down{}'.format(layer_num), in_channels,
                                      out_channels)
            #            conv_enc = nn.Sequential()
            #            conv_enc.add_module('conv{}'.format(layer_num),
            #                    nn.Conv1d(in_channels, out_channels, self.kernel_size,
            #                        stride=2,
            #                        padding=padding))
            #                        #axis=self.axis_on_level(layer_num-1)))
            #            conv_enc.add_module('bn{}'.format(layer_num),
            #                    nn.BatchNorm1d(out_channels))
            #            conv_enc.add_module('lrelu{}'.format(layer_num),
            #                    nn.LeakyReLU(0.2, inplace=True))
            current_size /= 2
            in_channels = out_channels
            n_channels.append(out_channels)
            if out_channels < 1024:
                out_channels *= 2
            layer_num += 1

            self.enc_modules.append(conv_enc)

        n_channels.reverse()
        current_size = 64
        layer_num = 1
        padding = (self.kernel_size - 1) / 2
        while current_size < self.size // 2:
            if layer_num == 1:
                in_channels = n_channels[layer_num - 1]
            else:
                in_channels = n_channels[layer_num - 1] * 2
            out_channels = n_channels[layer_num]

            conv_dec = MultiResConvTranspose1d('up{}'.format(layer_num),
                                               in_channels, out_channels)
            #            conv_dec = nn.Sequential()
            #            conv_dec.add_module('conv{}'.format(layer_num),
            #                    nn.ConvTranspose1d(in_channels,
            #                        out_channels,
            #                        self.kernel_size,
            #                        stride=2,
            #                        padding=padding))
            #            conv_dec.add_module('bn{}'.format(layer_num),
            #                    nn.BatchNorm1d(out_channels))
            #            conv_dec.add_module('relu{}'.format(layer_num),
            #                    nn.ReLU(inplace=True))

            current_size *= 2
            in_channels = out_channels
            layer_num += 1

            self.dec_modules.append(conv_dec)

        conv_dec = MultiResConvTranspose1d('up{}'.format(layer_num),
                                           in_channels, 256)
        self.dec_modules.append(conv_dec)

        #        conv_dec = nn.Sequential()
        #        conv_dec.add_module('conv{}'.format(layer_num),
        #                nn.ConvTranspose1d(in_channels, 256, self.kernel_size,
        #                    stride=2,
        #                    padding=padding))
        #        conv_dec.add_module('bn{}'.format(layer_num),
        #                nn.BatchNorm1d(256))
        #        conv_dec.add_module('relu{}'.format(layer_num),
        #                nn.ReLU(inplace=True))

        self.final_conv = nn.Sequential()
        self.final_conv.add_module(
            'final_conv1',
            nn.ConvTranspose1d(256 * 3, 128, 1, stride=1, padding=0))
        self.final_conv.add_module('bn_final', nn.BatchNorm1d(128))
        self.final_conv.add_module('relu_final', nn.ReLU(inplace=True))
        self.final_conv.add_module(
            'final_conv2', nn.ConvTranspose1d(128, 3, 1, stride=1, padding=0))
        self.final_conv.add_module('tanh_final', nn.Tanh())
Пример #12
0
    def __init__(self,
                 size,
                 dim,
                 batch_size=64,
                 enc_size=100,
                 kernel_size=2,
                 reg_fn=NormalReg(),
                 noise=0,
                 name="MLVAE"):
        super(MultiResVAE, self).__init__(name)

        self.reg_fn = reg_fn

        self.size = size
        self.dim = dim
        self.enc_size = enc_size
        self.batch_size = batch_size
        self.kernel_size = kernel_size
        self.enc_modules = nn.ModuleList()
        self.dec_modules = nn.ModuleList()
        self.upsample = Ops.NNUpsample1d()
        self.pool = nn.MaxPool1d(kernel_size=4, stride=4)
        self.noise_factor = noise

        self.enc_noise = torch.FloatTensor(self.batch_size, self.enc_size)

        custom_nfilters = [3, 32, 64, 128, 256, 512, 512, 1024, 1024, 1024]
        custom_nfilters = np.array(custom_nfilters)
        custom_nfilters[1:] /= 2
        self.last_size = 16

        self.noise = torch.FloatTensor(self.batch_size, self.enc_size)

        current_size = self.size
        layer_num = 1
        padding = (self.kernel_size - 1) / 2
        n_channels = []
        n_channels.append(custom_nfilters[layer_num - 1])
        while current_size > self.last_size:
            in_channels = custom_nfilters[layer_num - 1]
            out_channels = custom_nfilters[layer_num]
            conv_enc = MultiResConv1d('down{}'.format(layer_num), in_channels,
                                      out_channels)
            current_size /= 2
            in_channels = out_channels
            n_channels.append(out_channels)
            layer_num += 1

            self.enc_modules.append(conv_enc)

        self.enc_fc = nn.Linear(3 * self.last_size * in_channels,
                                self.enc_size)
        #self.enc_fc_mean = nn.Linear(3*self.last_size*in_channels, self.enc_size)
        #self.enc_fc_var = nn.Linear(3*self.last_size*in_channels, self.enc_size)
        self.dec_fc = nn.Linear(self.enc_size, self.last_size * n_channels[-1])

        self.final_feature = 128
        n_channels.reverse()
        n_channels[-1] = self.final_feature
        current_size = self.last_size
        layer_num = 1
        padding = (self.kernel_size - 1) / 2
        while current_size < self.size:
            in_channels = n_channels[layer_num - 1]
            out_channels = n_channels[layer_num]
            conv_dec = MultiResConvTranspose1d('up{}'.format(layer_num),
                                               in_channels, out_channels)
            current_size *= 2
            in_channels = out_channels
            layer_num += 1

            self.dec_modules.append(conv_dec)

        self.final_conv = nn.Sequential()
        self.final_conv.add_module(
            'final_conv1',
            nn.ConvTranspose1d(self.final_feature * 3,
                               128,
                               1,
                               stride=1,
                               padding=0))
        self.final_conv.add_module('bn_final', nn.BatchNorm1d(128))
        self.final_conv.add_module('relu_final', nn.ReLU(inplace=True))
        self.final_conv.add_module(
            'final_conv2', nn.ConvTranspose1d(128, 3, 1, stride=1, padding=0))
        self.final_conv.add_module('tanh_final', nn.Tanh())