Esempio n. 1
0
# if torch.cuda.device_count() > 1:
#     print("Using "+ str(torch.cuda.device_count() )+" GPUS" )
#     model = nn.DataParallel(model, device_ids=[0])
print("------Successfully Built model-------")

# model_save_dir = os.path.join(BASE_DIR, "models", "classifier_"+str(NUM_POINT))
# log_save_dir = os.path.join(BASE_DIR, "logs")
# os.makedirs(model_save_dir, exist_ok = True)
# os.makedirs(log_save_dir, exist_ok = True)

features_save_dir = os.path.join(BASE_DIR, "features")
os.makedirs(features_save_dir, exist_ok = True)

# TRAIN_FILES = provider.getDataFiles(os.path.join(BASE_DIR, 'data/threeclass/train_files.txt'))

train_dataset= provider.PCDDataset(BASE_DIR, "train", None, None)
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=False)

def getMidFeatures():
    feature_file = open(os.path.join(features_save_dir, "features.txt"),'w')
    model.eval()
    features = torch.cuda.FloatTensor(0, 192)
    eval_acc = 0
    with torch.no_grad():
        for data in train_loader:
            data.to(device)
            out = model(data.pos, data.batch)
            features = torch.cat((features,model.feature),0)
            loss = F.nll_loss(out, data.y)
            pred = out.max(1)[1]
            # print(pred.eq(data.y).sum())
Esempio n. 2
0
    dtype = torch.FloatTensor

print("------Building model and loading params-------")

cemodel = CenterEstimateNN(NUM_CLASS).to(device)
cemodel.load_state_dict(torch.load(os.path.join(BASE_DIR, 'models', 'center_pt1024_Huber', '500_0.0002767378449789248_1024')))

print("------Successfully Built model-------")


features_save_dir = os.path.join(BASE_DIR, "features")
os.makedirs(features_save_dir, exist_ok = True)

# TRAIN_FILES = provider.getDataFiles(os.path.join(BASE_DIR, 'data/threeclass/train_files.txt'))

test_dataset= provider.PCDDataset(BASE_DIR, "test", None, None)
test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False)

average_euclidean_dist=0;

def getPredictCPts():
    cemodel.eval()
    with torch.no_grad():
        for data in test_loader:
            data.to(device)
            out = cemodel(data.pos, data.batch)
            print("label:",data.center)
            print("predict cpt:",out)


if __name__ == '__main__':
Esempio n. 3
0
    datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
log_save_dir = os.path.join(
    BASE_DIR, "logs", "center_pt", "MSE",
    str(datetime.datetime.timestamp(datetime.datetime.now())))
os.makedirs(model_save_dir, exist_ok=True)
os.makedirs(log_save_dir, exist_ok=True)

TRAIN_FILES = provider.getDataFiles(
    os.path.join(BASE_DIR, 'data/threeclass/train_files.txt'))
TEST_FILES = provider.getDataFiles(
    os.path.join(BASE_DIR, 'data/threeclass/test_files.txt'))

# pre_transform, transform = T.NormalizeScale(), T.SamplePoints(NUM_POINT)
# train_dataset= provider.PCDDataset(BASE_DIR, "train", None, pre_transform= True)
train_dataset = provider.PCDDataset(BASE_DIR,
                                    "train",
                                    None,
                                    pre_transform=False)
# print(train_dataset.ang_m, train_dataset.ang_range , train_dataset.ctr_m , train_dataset.ctr_range)
test_dataset = provider.PCDDataset(BASE_DIR, "test", None, pre_transform=False)
# test_dataset= provider.PCDDataset(BASE_DIR, "test", None, pre_transform= False,
#                                   data_params = {'ang_m': train_dataset.ang_m,
#                                                  'ang_range': train_dataset.ang_range,
#                                                  'ctr_m': train_dataset.ctr_m,
#                                                  'ctr_std': train_dataset.ctr_std})
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)

# optimizer = torch.optim.SGD(model.parameters(), lr = 0.001, momentum = 0.9)
optimizer = torch.optim.Adam(cemodel.parameters(), lr=0.001)

print(train_dataset)