Пример #1
0
def train(model,n_epoch,optimizer,scheduler,device,data_dir,
          testPart,save_dir,type,is_hybird=False, hybird_dir=None):
    
    #MSE = nn.MSELoss()
    #loss = torch.nn.BCELoss()
    bceFCloss = bce_fc_loss.BCEFocalLoss(gamma=2, alpha=0.9)
    
    x_train, x_test = NNdataProcess.get_ae_data(testPart=testPart,data_dir=data_dir,is_return_dir=True)
    print(len(x_train),len(x_test))
    #exit(0)
    
    if hybird_dir:
        x2_train,x2_test = NNdataProcess.get_ae_data(testPart=testPart,data_dir=hybird_dir,is_return_dir=True)
        x_train, x_test = x_train+x2_train, x_test+x2_test
        random.shuffle(x_train)
        random.shuffle(x_test)
    
    print('start train AE')
    model.train()
    
    for epoch in range(3, n_epoch+1):   
        random.shuffle(x_train)
        
        for i in range(0,len(x_train),10):
            
            trainset = dataset.My_PLy_AE_Dataset(ply_dir = x_train[i:i+10], device=device,Normalize_= [],is_full_size=False,scale=0.3)
            trainloader = torch.utils.data.DataLoader(trainset, batch_size=2, shuffle=False, num_workers=0)
            
            for batch_idx, (idex,x) in enumerate(trainloader):
               
                encoder,decoder = model(x)
                loss = bceFCloss(x, decoder)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
              
                if batch_idx % 1 == 0:
                    print('epoch {}: {}/{},loss: {}'.format(epoch, i, len(x_train),loss))

        #scheduler.step()
        #if epoch % 3 == 0 or epoch == n_epoch:
        torch.save(model.state_dict(), save_dir+'AutoEncoder-epode-' + str(epoch)+ '-'+ type + '.pkl')
    
    # model test 
    model.eval()
    with torch.no_grad():
        for i in range(len(x_test)-1):
            testset = dataset.My_PLy_AE_Dataset(ply_dir = x_test[i:i+1], device=device,Normalize_= [])
            testloader = torch.utils.data.DataLoader(testset, batch_size=1, shuffle=True, num_workers=0)
            for batch_idx, (idex,x) in enumerate(testloader):
                encoder,decoder = model(x)
                loss = bceFCloss(x, decoder)
                print('epoch {}: {}/{},Test-loss: {}'.format(epoch, 
                                    batch_idx*len(x_test),len(testloader.dataset),loss))
Пример #2
0
def get_hidden_tensor(model, device, data_dir, save_dir):

    raw_pc_list, trunk_pc, _, _ = NNdataProcess.get_data(testPart=0,
                                                         data_dir=data_dir,
                                                         is_return_dir=True)

    print('start eval AE')

    model.eval()
    with torch.no_grad():
        MSE = nn.MSELoss()
        for i in range(len(raw_pc_list)):
            feature_vector = []
            trunk_vector = []

            pcSet = dataset.My_PLy_AE_Dataset(ply_dir=raw_pc_list[i],
                                              device=device,
                                              Normalize_=[])
            pcloader = torch.utils.data.DataLoader(pcSet,
                                                   batch_size=1,
                                                   shuffle=False,
                                                   num_workers=0)
            for batch_idx, (idex, x) in enumerate(pcloader):
                encoder, decoder = model(x)
                feature_vector.append(encoder.numpy().tolist()[0])
                loss = MSE(x, decoder)
                print('Test-loss: {}'.format(loss))

            trunkSet = dataset.My_PLy_AE_Dataset(ply_dir=[trunk_pc[i]],
                                                 device=device,
                                                 Normalize_=[])
            trunkloader = torch.utils.data.DataLoader(trunkSet,
                                                      batch_size=1,
                                                      shuffle=False,
                                                      num_workers=0)
            for batch_idx, (idex, x) in enumerate(trunkloader):
                encoder, decoder = model(x)
                trunk_vector.append(encoder.numpy().tolist()[0])
                loss = MSE(x, decoder)
                print('Test-loss: {}'.format(loss))

            # print(len(feature_vector),len(feature_vector[0]))
            # print(len(trunk_vector),len(trunk_vector[0]))
            # exit(0)
            if save_dir:
                file_dump = open(
                    save_dir + 'AE-tensor(RawPC-' + 'TrunkPC)_' + str(i) +
                    ".obj", 'wb')
                pickle.dump([[feature_vector], trunk_vector], file_dump)
                file_dump.close()
Пример #3
0
def test_sacling_ply(input_ply_dir,save_ply_dir):
    device = torch.device('cpu')
    testset = dataset.My_PLy_AE_Dataset(ply_dir=[input_ply_dir], device=device,Normalize_= None,scale=0.3)
    testloader = torch.utils.data.DataLoader(testset, batch_size=1, shuffle=True, num_workers=0)
    for batch_idx, (idex,x) in enumerate(testloader):
        x = torch.squeeze(x)
        print(x.size())
        tensor_to_ply(x,0.3,save_ply_dir,scale_factor=None)
Пример #4
0
def creat_ply_by_AE(model,device,threshold,input_ply_dir,save_ply_dir):
    model.eval()
    MSE = nn.MSELoss()
    with torch.no_grad():
        
        testset = dataset.My_PLy_AE_Dataset(ply_dir =[input_ply_dir], device=device,Normalize_= None,)
        testloader = torch.utils.data.DataLoader(testset, batch_size=1, shuffle=True, num_workers=0)
        for batch_idx, (idex,x) in enumerate(testloader):
            encoder,decoder = model(x)
            loss = MSE(x, decoder)
            print('Test-loss: {}'.format(loss))
            print(decoder.size())
            AE_decoder_out = torch.squeeze(decoder)
            print(AE_decoder_out.size())
            print(AE_decoder_out)
            tensor_to_ply(AE_decoder_out,threshold,save_ply_dir,scale_factor=None)
Пример #5
0
def get_rnn_in_featur(model, device, rawPc_ply_dir_list):
    model.eval()
    with torch.no_grad():
        feature_vector = torch.zeros(1, 4096).to(device)
        for i in range(len(rawPc_ply_dir_list)):
            pcSet = dataset.My_PLy_AE_Dataset(ply_dir=[rawPc_ply_dir_list[i]],
                                              device=device,
                                              Normalize_=[])
            pcloader = torch.utils.data.DataLoader(pcSet,
                                                   batch_size=1,
                                                   shuffle=False,
                                                   num_workers=0)
            for batch_idx, (idex, x) in enumerate(pcloader):

                encoder, decoder = model(x)
                if i == 0:
                    feature_vector = encoder
                else:
                    feature_vector = torch.cat((feature_vector, encoder), 0)

    return feature_vector
Пример #6
0
def get_rnn_out(model, rawPc_dir_seq):

    model.eval()
    with torch.no_grad():
        testset = dataset.My_PLy_AE_Dataset(ply_dir=rawPc_dir_seq,
                                            device=device,
                                            Normalize_=[],
                                            is_full_size=False,
                                            scale=1.0)
        testloader = torch.utils.data.DataLoader(testset,
                                                 batch_size=1,
                                                 shuffle=False,
                                                 num_workers=0)
        for batch_idx, (idex, x) in enumerate(testloader):

            hidden = model.init_hidden()
            batch_x = Variable(x)

            output = batch_x
            output, hidden = model(batch_x, hidden)
            output = torch.squeeze(output)

    return output