batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True, ) train_set = ModelNet40Cls(args.num_points, transforms=train_transforms) train_loader = DataLoader( train_set, batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True, ) model = Pointnet(input_channels=0, num_classes=40, use_xyz=True) model.cuda() optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) lr_lbmd = lambda it: max( args.lr_decay**(int(it * args.batch_size / args.decay_step)), lr_clip / args.lr, ) bn_lbmd = lambda it: max( args.bn_momentum * args.bnm_decay** (int(it * args.batch_size / args.decay_step)), bnm_clip, )
batch_size=args.batch_size, shuffle=True, num_workers=2, pin_memory=True, ) train_set = ModelNet40Cls(args.num_points, transforms=transforms) train_loader = DataLoader( train_set, batch_size=args.batch_size, shuffle=True, num_workers=2, pin_memory=True, ) model = Pointnet(input_channels=0, num_classes=40, use_xyz=True) model.cuda() optimizer = optim.Adam( model.parameters(), lr=args.lr, weight_decay=args.weight_decay ) lr_lbmd = lambda it: max( args.lr_decay ** (int(it * args.batch_size / args.decay_step)), lr_clip / args.lr, ) bn_lbmd = lambda it: max( args.bn_momentum * args.bnm_decay ** (int(it * args.batch_size / args.decay_step)), bnm_clip, ) # default value
division, absolute_import, with_statement, print_function, unicode_literals, ) import numpy as np import time import glob import torch import torch.optim as optim import torch.optim.lr_scheduler as lr_sched import torch.nn as nn from torch.utils.data import DataLoader from torchvision import transforms import etw_pytorch_utils as pt_utils import pprint import os.path as osp import os, sys import argparse sys.path.append('../..') from pointnet2.models import Pointnet2ClsMSG as Pointnet from pointnet2.models.pointnet2_msg_cls import model_fn_decorator from pointnet2.data import ScanNet import pointnet2.data.data_utils as d_utils if __name__ == "__main__": # model model = Pointnet(input_channels=3, num_classes=21, use_xyz=True) print('#parameters %d' % sum([x.nelement() for x in model.parameters()]))
shuffle=True, num_workers=2, pin_memory=True, drop_last=True) val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=2, pin_memory=True, drop_last=False) # model device = torch.device("cuda" if args.cuda else "cpu") model = Pointnet(input_channels=args.input_plane, num_classes=args.output_plane, use_xyz=True) model.to(device) model = nn.DataParallel(model) print("Let's use", torch.cuda.device_count(), "GPUs!") # optimizer optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) # scheduler lr_lbmd = lambda it: max( args.lr_decay**(int(it * args.batch_size / args.decay_step)), lr_clip / args.lr, )
val_set = ScanNet(val_files,args.num_points,color=True) else: val_set = ScanNet(val_files,args.num_points) val_loader = DataLoader( val_set, batch_size=args.batch_size, shuffle=False, num_workers=2, pin_memory=True, drop_last=False ) # model device = torch.device("cuda" if args.cuda else "cpu") model = Pointnet(input_channels=args.input_plane, num_classes=args.output_plane, use_xyz=True) model.to(device) model=nn.DataParallel(model) print("Let's use", torch.cuda.device_count(), "GPUs!") # restore model path_checkpoint=os.path.join('logs',args.exp_name,'model.t7') checkpoint = torch.load(path_checkpoint) # checkpoint model.load_state_dict(checkpoint) #################### # Validate #################### model.eval() test_pred = [] test_true = []