Example #1
0
def test(args, io):
    test_loader = DataLoader(ModelNet40(partition='test',
                                        num_points=args.num_points),
                             batch_size=args.test_batch_size,
                             shuffle=True,
                             drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")

    #Try to load models
    model = DGCNN(args).to(device)
    model = nn.DataParallel(model)
    model.load_state_dict(torch.load(args.model_path))
    model = model.eval()
    test_acc = 0.0
    count = 0.0
    test_true = []
    test_pred = []
    for data, label in test_loader:

        data, label = data.to(device), label.to(device).squeeze()
        data = data.permute(0, 2, 1)
        batch_size = data.size()[0]
        logits = model(data)
        preds = logits.max(dim=1)[1]
        test_true.append(label.cpu().numpy())
        test_pred.append(preds.detach().cpu().numpy())
    test_true = np.concatenate(test_true)
    test_pred = np.concatenate(test_pred)
    test_acc = metrics.accuracy_score(test_true, test_pred)
    avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
    outstr = 'Test :: test acc: %.6f, test avg acc: %.6f' % (test_acc,
                                                             avg_per_class_acc)
    io.cprint(outstr)
Example #2
0
def loadcustomtoclassify(args, io):
    device = torch.device("cuda" if args.cuda else "cpu")
    model = DGCNN(args).to(device)
    model = nn.DataParallel(model)
    model.load_state_dict(torch.load("pretrained/custommodel.t7"))
    model = model.eval()
    classify_folder(model)
Example #3
0
def test(args, io):
    if args.dataset == 'modelnet40':
        test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points), num_workers=8,
                                 batch_size=args.test_batch_size, shuffle=True, drop_last=False)
    elif args.dataset == 'ScanObjectNN':
        test_loader = DataLoader(ScanObjectNN(partition='test', num_points=args.num_points), num_workers=8,
                                 batch_size=args.test_batch_size, shuffle=True, drop_last=False)
    else:
        raise Exception("Dataset Not supported")

    device = torch.device("cuda" if args.cuda else "cpu")

    #Try to load models
    if args.model == 'pointnet':
        if args.dataset == 'modelnet40':
            model = PointNet(args, output_channels=40).to(device)
        elif args.dataset == 'ScanObjectNN':
            model = PointNet(args, output_channels=15).to(device)
        else:
            raise Exception("Dataset Not supported")
    elif args.model == 'dgcnn':
        if args.dataset == 'modelnet40':
            model = DGCNN(args, output_channels=40).to(device)
        elif args.dataset == 'ScanObjectNN':
            model = DGCNN(args, output_channels=15).to(device)
        else:
            raise Exception("Dataset Not supported")
    elif args.model == 'gbnet':
        if args.dataset == 'modelnet40':
            model = GBNet(args, output_channels=40).to(device)
        elif args.dataset == 'ScanObjectNN':
            model = GBNet(args, output_channels=15).to(device)
        else:
            raise Exception("Dataset Not supported")
    else:
        raise Exception("Not implemented")
    print(str(model))
    model = nn.DataParallel(model)
    model.load_state_dict(torch.load(args.model_path))
    model = model.eval()
    test_acc = 0.0
    count = 0.0
    test_true = []
    test_pred = []
    for data, label in test_loader:

        data, label = data.to(device), label.to(device).squeeze()
        data = data.permute(0, 2, 1)
        batch_size = data.size()[0]
        logits = model(data)
        preds = logits.max(dim=1)[1]
        test_true.append(label.cpu().numpy())
        test_pred.append(preds.detach().cpu().numpy())
    test_true = np.concatenate(test_true)
    test_pred = np.concatenate(test_pred)
    test_acc = metrics.accuracy_score(test_true, test_pred)
    avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
    outstr = 'Test :: test acc: %.6f, test avg acc: %.6f'%(test_acc, avg_per_class_acc)
    io.cprint(outstr)
Example #4
0
    def __init__(self):
        self.device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')

        self.model = DGCNN(256,char_file = config.char_embedding_path,\
            word_file = config.word_embedding_path).to(self.device)
        self.epoches = 150
        self.lr = 1e-4

        self.print_step = 15
        self.optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()),\
            lr=self.lr)

        self.best_model = DGCNN(256,char_file=config.char_embedding_path,\
            word_file = config.word_embedding_path).to(self.device)
        self._val_loss = 1e12
Example #5
0
def main(args, io):
    print("Let's use", torch.cuda.device_count(), "GPUs!")
    DGCNNModel = DGCNN(args).cuda()
    DGCNNModel = nn.DataParallel(DGCNNModel)
    DGCNNModel.load_state_dict(torch.load(args.path))

    SampleNetModel = SampleNet(num_out_points=args.sample_points,
                               bottleneck_size=args.bottleneck_size,
                               group_size=args.group_size,
                               initial_temperature=1.0,
                               input_shape="bnc",
                               output_shape="bnc",
                               complete_fps=False)
    SampleNetModel.cuda()
    SampleNetModel = nn.DataParallel(SampleNetModel)

    train_loader = DataLoader(S3DIS_cls(partition='train',
                                        num_points=args.num_points,
                                        test_area=args.test_area),
                              num_workers=0,
                              batch_size=args.batch_size,
                              shuffle=True,
                              drop_last=True)
    test_loader = DataLoader(S3DIS_cls(partition='test',
                                       num_points=args.num_points,
                                       test_area=args.test_area),
                             num_workers=0,
                             batch_size=args.batch_size,
                             shuffle=False,
                             drop_last=False)

    opt = optim.SGD(SampleNetModel.parameters(),
                    lr=args.lr,
                    momentum=0.9,
                    weight_decay=1e-4)
    scheduler = CosineAnnealingLR(opt, args.epochs)

    best_test_loss = 1e10
    for i in range(args.epochs):
        io.cprint('Epoch [%d]' % (i + 1))
        train(SampleNetModel, DGCNNModel, train_loader, opt, io)
        scheduler.step()
        test_loss = test(SampleNetModel, DGCNNModel, test_loader, io)

        torch.save(SampleNetModel, 'SampleNetCheckPoint/checkpoint.t7')
        if test_loss < best_test_loss:
            best_test_loss = test_loss
            torch.save(SampleNetModel, 'SampleNetCheckPoint/bestsamplenet.t7')
Example #6
0
File: main.py Project: sngver/dgcnn
def test(args, io):
    test_loader = DataLoader(ModelNet40(partition='test',
                                        num_points=args.num_points),
                             batch_size=args.test_batch_size,
                             shuffle=True,
                             drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")

    #Try to load models
    model = DGCNN(args).to(device)
    model = nn.DataParallel(model)
    model.load_state_dict(torch.load(args.model_path))
    model = model.eval()
    test_acc = 0.0
    count = 0.0
    test_true = []
    test_pred = []

    #
    DUMP_DIR = 'checkpoints/' + args.exp_name + '/' + 'dump'

    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')
    #

    for data, label in test_loader:

        data, label = data.to(device), label.to(device).squeeze()
        data = data.permute(0, 2, 1)
        batch_size = data.size()[0]
        logits = model(data)
        preds = logits.max(dim=1)[1]
        test_true.append(label.cpu().numpy())
        test_pred.append(preds.detach().cpu().numpy())
    test_true = np.concatenate(test_true)
    test_pred = np.concatenate(test_pred)

    #
    for i in range(list(test_true.shape)[0]):
        fout.write('%d, %d\n' % (test_pred[i], test_true[i]))
    #

    test_acc = metrics.accuracy_score(test_true, test_pred)
    avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
    outstr = 'Test :: test acc: %.6f, test avg acc: %.6f' % (test_acc,
                                                             avg_per_class_acc)
    io.cprint(outstr)
Example #7
0
    def __init__(self, args, output_channels=40):
        super(DGCNN_exit3, self).__init__()
        self.args = args
        self.DGCNN = DGCNN(args)
        dict_tmp = torch.load('./pretrained/model.1024.t7')
        new_state_dict = OrderedDict()
        #print(dict_tmp)
        for name, tensor in dict_tmp.items():
            #print(name)
            name = name[7:]
            new_state_dict[name] = tensor
            #print(name)
        #self.DGCNN.load_state_dict(torch.load('./pretrained/model.1024.t7'))
        self.DGCNN.load_state_dict(new_state_dict)
        self.k = 20

        for para in self.DGCNN.parameters():
            para.requires_grad = False

        self.exit3_conv = nn.Sequential(
            nn.Conv1d(256, 256, kernel_size=1, bias=False),
            nn.BatchNorm1d(256),
            nn.LeakyReLU(negative_slope=0.2),
        )
        self.exit3_fc2 = nn.Sequential(
            nn.Linear(512, 512),
            nn.BatchNorm1d(512),
            nn.LeakyReLU(negative_slope=0.2),
        )
        self.exit3_predict = nn.Sequential(
            nn.Linear(512, 512),
            nn.BatchNorm1d(512),
            nn.LeakyReLU(negative_slope=0.2),
            nn.Dropout(0.5),
            nn.Linear(512, 256),
            nn.BatchNorm1d(256),
            nn.LeakyReLU(negative_slope=0.2),
            nn.Dropout(0.5),
            nn.Linear(256, 128),
            nn.BatchNorm1d(128),
            nn.LeakyReLU(negative_slope=0.2),
            nn.Dropout(0.5),
            nn.Linear(128, 40),
            nn.BatchNorm1d(40),
            nn.LeakyReLU(negative_slope=0.2),
        )
Example #8
0
def test(args, io):
    test_loader = DataLoader(ModelNet40(partition='test',
                                        num_points=args.num_points),
                             batch_size=args.test_batch_size,
                             shuffle=True,
                             drop_last=False)
    device = torch.device("cuda" if args.cuda else "cpu")

    #Try to load models
    if args.model == 'pointnet':
        model = PointNet(args).to(device)
    elif args.model == 'dgcnn':
        model = DGCNN(args).to(device)
    else:
        raise Exception("Not implemented")
    print(str(model))

    model = nn.DataParallel(model)
    checkpoint = torch.load(args.resume)
    model.load_state_dict(checkpoint['state_dict'])
    model = model.eval()
    test_acc = 0.0
    count = 0.0
    test_true = []
    test_pred = []
    SHAPE_NAMES = [line.rstrip() for line in \
                   open('data/modelnet40_ply_hdf5_2048/shape_names.txt')]
    NUM_CLASSES = 40
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    for data, label in test_loader:
        data, label = data.to(device), label.to(device).squeeze()
        data = data.permute(0, 2, 1)
        batch_size = data.size()[0]
        logits = model(data)
        preds = logits.max(dim=1)[1]
        test_true.append(label.cpu().numpy())
        test_pred.append(preds.detach().cpu().numpy())
    test_true = np.concatenate(test_true)
    test_pred = np.concatenate(test_pred)
    test_acc = metrics.accuracy_score(test_true, test_pred)
    avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
    outstr = 'Test :: test acc: %.6f, test avg acc: %.6f' % (test_acc,
                                                             avg_per_class_acc)
    io.cprint(outstr)
    for i in range(test_true.shape[0]):
        l = test_true[i]
        total_seen_class[l] += 1
        total_correct_class[l] += (test_pred[i] == l)
    class_accuracies = np.array(total_correct_class) / np.array(
        total_seen_class, dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        io.cprint('%10s:\t%0.3f' % (name, class_accuracies[i]))
Example #9
0
def startcustomtesting(args, io):
    ft_loader = DataLoader(FT11(num_points=args.num_points),
                           num_workers=8,
                           batch_size=args.test_batch_size,
                           shuffle=True,
                           drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")
    model = DGCNN(args).to(device)
    model = nn.DataParallel(model)
    model.load_state_dict(torch.load("pretrained/custommodel.t7"))
    model = model.eval()

    criterion = cal_loss
    epoch = 1

    ft_loss = 0.0
    count = 0
    model.eval()
    ft_pred = []
    ft_true = []
    for data, label in ft_loader:
        data, label = data.to(device), label.to(device).squeeze()
        data = data.permute(0, 2, 1)
        batch_size = data.size()[0]
        logits = model(data)
        loss = criterion(logits, label)
        preds = logits.max(dim=1)[1]
        count += batch_size
        ft_loss += loss.item() * batch_size
        ft_true.append(label.cpu().numpy())
        ft_pred.append(preds.detach().cpu().numpy())
        #print(data.shape, label.shape, logits.shape, preds.shape)
        print('LABELS:', label)
        print('PREDS:', preds)
        #print('LOGITS:', logits)
    ft_true = np.concatenate(ft_true)
    ft_pred = np.concatenate(ft_pred)
    ft_acc = metrics.accuracy_score(ft_true, ft_pred)
    avg_per_class_acc = metrics.balanced_accuracy_score(ft_true, ft_pred)
    outstr = 'Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f' % (
        epoch, ft_loss * 1.0 / count, ft_acc, avg_per_class_acc)
    io.cprint(outstr)
Example #10
0
def main(args, io):
    train_loader = DataLoader(S3DIS_cls(partition='train',
                                        num_points=args.num_points,
                                        test_area=args.test_area),
                              num_workers=8,
                              batch_size=args.batch_size,
                              shuffle=True,
                              drop_last=True)
    test_loader = DataLoader(S3DIS_cls(partition='test',
                                       num_points=args.num_points,
                                       test_area=args.test_area),
                             num_workers=8,
                             batch_size=args.batch_size,
                             shuffle=False,
                             drop_last=False)

    model = DGCNN(args)
    model.cuda()
    model = nn.DataParallel(model)

    print("Let's use", torch.cuda.device_count(), "GPUs!")

    opt = optim.SGD(model.parameters(),
                    lr=args.lr,
                    momentum=0.9,
                    weight_decay=1e-4)
    scheduler = CosineAnnealingLR(opt, args.epochs)

    best_test_loss = 1e10
    for epoch in range(args.epochs):
        io.cprint('Epoch [%d]' % (epoch + 1))
        train(model, train_loader, opt, io)
        scheduler.step()
        test_loss = test(model, test_loader, io)

        if test_loss < best_test_loss:
            best_test_loss = test_loss
            # save in torch==1.4 readible style
            torch.save(model.state_dict(),
                       'checkpoints/%s/model_%s.t7' %
                       (args.exp_name, args.test_area),
                       _use_new_zipfile_serialization=False)
Example #11
0
def startcustomtraining(args, io):
    ft_loader = DataLoader(FT10(num_points=args.num_points),
                           num_workers=8,
                           batch_size=args.test_batch_size,
                           shuffle=True,
                           drop_last=True)
    ft_test_loader = DataLoader(FT11(num_points=args.num_points),
                                num_workers=8,
                                batch_size=args.test_batch_size,
                                shuffle=True,
                                drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")

    #Try to load models
    if args.model == 'pointnet':
        model = PointNet(args).to(device)
    elif args.model == 'dgcnn':
        model = DGCNN(args).to(device)
    else:
        raise Exception("Not implemented")
    print(str(model))

    model = nn.DataParallel(model)
    print("Let's use", torch.cuda.device_count(), "GPUs!")

    if args.use_sgd:
        print("Use SGD")
        opt = optim.SGD(model.parameters(),
                        lr=args.lr * 100,
                        momentum=args.momentum,
                        weight_decay=1e-4)
    else:
        print("Use Adam")
        opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)

    scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr)

    criterion = cal_loss
    best_ft_test_acc = 0.0

    i = 0
    train_accs = []
    test_accs = []
    epochs = []

    for epoch in range(args.epochs):
        i += 1
        scheduler.step()
        ft_loss = 0.0
        count = 0
        model.train()
        ft_pred = []
        ft_true = []
        for data, label in ft_loader:
            data, label = data.to(device), label.to(device).squeeze()
            data = data.permute(0, 2, 1)
            batch_size = data.size()[0]
            opt.zero_grad()
            logits = model(data)
            loss = criterion(logits, label)
            loss.backward()
            opt.step()
            preds = logits.max(dim=1)[1]
            count += batch_size
            ft_loss += loss.item() * batch_size
            ft_true.append(label.cpu().numpy())
            ft_pred.append(preds.detach().cpu().numpy())
            #print(data.shape, label.shape, logits.shape, preds.shape)
            #print('LABELS:', label)
            #print('PREDS:', preds)
            #print('LOGITS:', logits)
        ft_true = np.concatenate(ft_true)
        ft_pred = np.concatenate(ft_pred)
        ft_acc = metrics.accuracy_score(ft_true, ft_pred)
        avg_per_class_acc = metrics.balanced_accuracy_score(ft_true, ft_pred)
        outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (
            epoch, ft_loss * 1.0 / count, ft_acc, avg_per_class_acc)
        io.cprint(outstr)
        train_accs.append(ft_acc)

        ft_test_loss = 0.0
        count = 0
        model.eval()
        ft_test_pred = []
        ft_test_true = []
        for data, label in ft_test_loader:
            data, label = data.to(device), label.to(device).squeeze()
            data = data.permute(0, 2, 1)
            batch_size = data.size()[0]
            logits = model(data)
            loss = criterion(logits, label)
            preds = logits.max(dim=1)[1]
            count += batch_size
            ft_test_loss += loss.item() * batch_size
            ft_test_true.append(label.cpu().numpy())
            ft_test_pred.append(preds.detach().cpu().numpy())
            #print(data.shape, label.shape, logits.shape, preds.shape)
            #print('LABELS:', label)
            #print('PREDS:', preds)
            #print('LOGITS:', logits)
        ft_test_true = np.concatenate(ft_test_true)
        ft_test_pred = np.concatenate(ft_test_pred)
        ft_test_acc = metrics.accuracy_score(ft_test_true, ft_test_pred)
        avg_per_class_acc = metrics.balanced_accuracy_score(
            ft_test_true, ft_test_pred)
        outstr = 'Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f' % (
            epoch, ft_test_loss * 1.0 / count, ft_test_acc, avg_per_class_acc)
        io.cprint(outstr)
        if ft_test_acc > best_ft_test_acc:
            print('save now')
            best_ft_test_acc = ft_test_acc
            torch.save(model.state_dict(), 'pretrained/custommodel.t7')
        #torch.save(model.state_dict(), 'pretrained/custommodel.t7')

        epochs.append(i)
        test_accs.append(ft_test_acc)

        fig, ax = plt.subplots()
        ax.plot(epochs, train_accs, color='blue', label='train acc')
        ax.plot(epochs, test_accs, color='red', label='test acc')
        ax.set(xlabel='epoch',
               ylabel='accuracy',
               title='accuracy values per epoch')
        ax.grid()
        ax.legend()
        fig.savefig("accuracy.png")
        plt.show()
Example #12
0
    hp.gpu_nums,
    hp.bert_pre,
    shuffle=False)

handle = tf.placeholder(tf.string, shape=[])
iter = tf.data.Iterator.from_string_handle(handle, train_batches.output_types,
                                           train_batches.output_shapes)
# create a iter of the correct shape and type
xs, ys, labels = iter.get_next()

logging.info('# init data')
training_iter = train_batches.make_one_shot_iterator()
val_iter = eval_batches.make_initializable_iterator()

logging.info("# Load model")
m = DGCNN(hp)
# load Bert
input_ids, input_masks, segment_ids = concat_inputs(xs, ys)
vec = BertVec(hp.bert_pre, input_ids, input_masks, segment_ids)

logging.info('# Get train and eval op')
total_steps = hp.num_epochs * num_train_batches
train_op, train_loss, train_summaries, global_step = m.train_multi(
    vec, xs[1], ys[1], labels, total_steps)
indexs, eval_loss, eval_summaries = m.eval(vec, xs[1], ys[1], labels)

logging.info("# Session")
saver = tf.train.Saver(max_to_keep=hp.num_epochs)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
    ckpt = tf.train.latest_checkpoint(hp.logdir)
    if ckpt is None:
Example #13
0
class DGCNN_exit1(nn.Module):
    def __init__(self, args, output_channels=40):
        super(DGCNN_exit1, self).__init__()
        self.args = args
        self.DGCNN = DGCNN(args)
        dict_tmp = torch.load('./pretrained/model.1024.t7')
        new_state_dict = OrderedDict()
        #print(dict_tmp)
        for name, tensor in dict_tmp.items():
            #print(name)
            name = name[7:]
            new_state_dict[name] = tensor

        self.DGCNN.load_state_dict(new_state_dict)
        self.k = 20

        for para in self.DGCNN.parameters():
            para.requires_grad = False

        self.exit1_conv = nn.Sequential(
            nn.Conv1d(64, 256, kernel_size=1, bias=False),
            nn.BatchNorm1d(256),
            nn.LeakyReLU(negative_slope=0.2),
        )
        self.exit1_fc2 = nn.Sequential(
            nn.Linear(512, 1536),
            nn.BatchNorm1d(1536),
            nn.LeakyReLU(negative_slope=0.2),
        )
        self.exit1_predict = nn.Sequential(
            nn.Linear(1536, 512),
            nn.BatchNorm1d(512),
            nn.LeakyReLU(negative_slope=0.2),
            nn.Dropout(0.5),
            nn.Linear(512, 256),
            nn.BatchNorm1d(256),
            nn.LeakyReLU(negative_slope=0.2),
            nn.Dropout(0.5),
            nn.Linear(256, 128),
            nn.BatchNorm1d(128),
            nn.LeakyReLU(negative_slope=0.2),
            nn.Dropout(0.5),
            nn.Linear(128, 40),
            nn.BatchNorm1d(40),
            nn.LeakyReLU(negative_slope=0.2),
        )

    def forward(self, x, noise_factor=0.1):

        batch_size = x.size(0)
        x = get_graph_feature(
            x, k=self.k)  # [batch_size, dim=3 * 2, point_num, k]
        x = self.DGCNN.conv1(x)
        x1 = x.max(dim=-1,
                   keepdim=False)[0]  # [batch_size, dim = 64, point_num]
        x = x1  # do not need to concate

        #exit 1
        x = self.exit1_conv(x)

        x1 = F.adaptive_max_pool1d(x, 1).view(batch_size,
                                              -1)  # (batch_size, dimension)
        x2 = F.adaptive_avg_pool1d(x, 1).view(batch_size,
                                              -1)  # (batch_size, dimension)
        x = torch.cat((x1, x2), 1)
        x = self.exit1_fc2(x)

        #awgn channel model
        #x = awgn_channel(x,0.1) # 20dB
        x = awgn_channel(x, self.args.channel_noise)

        x = self.exit1_predict(x)
        return x
Example #14
0
def train(args, io):
    train_loader = DataLoader(ModelNet40(partition='train', num_points=args.num_points), num_workers=8,
                              batch_size=args.batch_size, shuffle=True, drop_last=True)
    test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points), num_workers=8,
                             batch_size=args.test_batch_size, shuffle=True, drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")

    #Try to load models
    if args.model == 'pointnet':
        model = PointNet(args).to(device)
    elif args.model == 'dgcnn':
        model = DGCNN(args).to(device)
    elif args.model == 'semigcn':
        model = SemiGCN(args).to(device)
    else:
        raise Exception("Not implemented")
    print(str(model))

    model = nn.DataParallel(model)
    print("Let's use", torch.cuda.device_count(), "GPUs!")

    if args.use_sgd:
        print("Use SGD")
        opt = optim.SGD(model.parameters(), lr=args.lr*100, momentum=args.momentum, weight_decay=1e-4)
    else:
        print("Use Adam")
        opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
        
    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            opt.load_state_dict(checkpoint['opt'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
            
    #scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr, last_epoch=args.start_epoch-1)
    scheduler = torch.optim.lr_scheduler.StepLR(opt, step_size=20, gamma=0.8)#0.7
    #scheduler = torch.optim.lr_scheduler.ExponentialLR(opt, gamma=0.9825, last_epoch=args.start_epoch-1)
    
    criterion = cal_loss

    best_test_acc = 0
    for epoch in range(args.start_epoch, args.epochs):
        #scheduler.step()
        ####################
        # Train
        ####################
        train_loss = 0.0
        count = 0.0
        model.train()
        train_pred = []
        train_true = []
        for data, label in train_loader:
            data, label = data.to(device), label.to(device).squeeze()
            data = data.permute(0, 2, 1)
            batch_size = data.size()[0]
            opt.zero_grad()
            logits = model(data)
            loss = criterion(logits, label)
            loss.backward()
            opt.step()
            preds = logits.max(dim=1)[1]
            count += batch_size
            train_loss += loss.item() * batch_size
            train_true.append(label.cpu().numpy())
            train_pred.append(preds.detach().cpu().numpy())
        scheduler.step()
        train_true = np.concatenate(train_true)
        train_pred = np.concatenate(train_pred)
        outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (epoch,
                                                                                 train_loss*1.0/count,
                                                                                 metrics.accuracy_score(
                                                                                     train_true, train_pred),
                                                                                 metrics.balanced_accuracy_score(
                                                                                     train_true, train_pred))
        io.cprint(outstr)
        if epoch%10 == 0:
            # save running checkpoint per 10 epoch
            torch.save({'epoch': epoch + 1,
                        'arch': args.model,
                        'state_dict': model.state_dict(),
                        'opt' : opt.state_dict()},
                        'checkpoints/%s/models/checkpoint_latest.pth.tar' % args.exp_name)
        ####################
        # Test
        ####################
        test_loss = 0.0
        count = 0.0
        model.eval()
        test_pred = []
        test_true = []
        for data, label in test_loader:
            data, label = data.to(device), label.to(device).squeeze()
            data = data.permute(0, 2, 1)
            batch_size = data.size()[0]
            logits = model(data)
            loss = criterion(logits, label)
            preds = logits.max(dim=1)[1]
            count += batch_size
            test_loss += loss.item() * batch_size
            test_true.append(label.cpu().numpy())
            test_pred.append(preds.detach().cpu().numpy())
        test_true = np.concatenate(test_true)
        test_pred = np.concatenate(test_pred)
        test_acc = metrics.accuracy_score(test_true, test_pred)
        avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
        outstr = 'Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f' % (epoch,
                                                                              test_loss*1.0/count,
                                                                              test_acc,
                                                                              avg_per_class_acc)
        io.cprint(outstr)
        if test_acc >= best_test_acc:
            best_test_acc = test_acc
            torch.save({'epoch': epoch + 1,
                        'arch': args.model,
                        'state_dict': model.state_dict(),
                        'opt' : opt.state_dict()},
                        'checkpoints/%s/models/checkpoint_best.pth.tar' % args.exp_name)
    seed = 1234
    torch.backends.cudnn.deterministic = True
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)

    num_points = 1024
    # Load datasets.
    train_dataset, train_dataloader, test_dataset, test_dataloader, \
         = create_datasets_and_dataloaders(num_points)

    # Create the network.
    n_dims = 3
    net = {}
    if args.frontend == 'DGCNN':
        net['frontend'] = DGCNN(n_dims, args.embed_dims)
    else:
        net['frontend'] = PointNet(n_dims, args.embed_dims)

    if args.attention:
        net['middle'] = Transformer(args.embed_dims)

    if args.backend == 'SVD':
        net['backend'] = SVD(args.embed_dims, device)
    else:
        net['backend'] = MLP(args.embed_dims, device)

    if torch.cuda.is_available():
        for key in net.keys():
            net[key].cuda()
Example #16
0
                        metavar='N',
                        help='Num of nearest neighbors to use in DGCNN')
    parser.add_argument('--print_iter',
                        type=int,
                        default=100,
                        help='Print interval')
    args = parser.parse_args()
    set_seed(1)
    print(args)

    # enable cudnn benchmark
    cudnn.benchmark = True

    # build model
    if args.model.lower() == 'dgcnn':
        model = DGCNN(args.emb_dims, args.k, output_channels=40)
    elif args.model.lower() == 'pointnet':
        model = PointNetCls(k=40, feature_transform=args.feature_transform)
    elif args.model.lower() == 'pointnet2':
        model = PointNet2ClsSsg(num_classes=40)
    elif args.model.lower() == 'pointconv':
        model = PointConvDensityClsSsg(num_classes=40)
    else:
        print('Model not recognized')
        exit(-1)

    model = nn.DataParallel(model).cuda()

    # use Adam optimizer, cosine lr decay
    opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
    scheduler = CosineAnnealingLR(opt, T_max=args.epochs, eta_min=1e-5)
                        default=200,
                        metavar='N',
                        help='Number of dropping points')
    args = parser.parse_args()
    BATCH_SIZE = BATCH_SIZE[args.num_points]
    BEST_WEIGHTS = BEST_WEIGHTS[args.dataset][args.num_points]
    if args.batch_size == -1:
        args.batch_size = BATCH_SIZE[args.model]
    set_seed(1)
    print(args)

    cudnn.benchmark = True

    # build model
    if args.model.lower() == 'dgcnn':
        model = DGCNN(args.emb_dims, args.k, output_channels=40)
    elif args.model.lower() == 'pointnet':
        model = PointNetCls(k=40, feature_transform=args.feature_transform)
    elif args.model.lower() == 'pointnet2':
        model = PointNet2ClsSsg(num_classes=40)
    elif args.model.lower() == 'pointconv':
        model = PointConvDensityClsSsg(num_classes=40)
    else:
        print('Model not recognized')
        exit(-1)

    model = nn.DataParallel(model).cuda()

    # load model weight
    print('Loading weight {}'.format(BEST_WEIGHTS[args.model]))
    state_dict = torch.load(BEST_WEIGHTS[args.model])
Example #18
0
    if args.batch_size == -1:  # automatic assign
        args.batch_size = BATCH_SIZE[args.model]
    # add point attack has more points in each point cloud
    if 'ADD' in args.data_root:
        args.batch_size = int(args.batch_size / 1.5)
    # sor processed point cloud has different points in each
    # so batch size only can be 1
    if 'sor' in args.data_root:
        args.batch_size = 1

    # enable cudnn benchmark
    cudnn.benchmark = True

    # build model
    if args.model.lower() == 'dgcnn':
        model = DGCNN(args.emb_dims, args.k, output_channels=40)
    elif args.model.lower() == 'pointnet':
        model = PointNetCls(k=40, feature_transform=args.feature_transform)
    elif args.model.lower() == 'pointnet2':
        model = PointNet2ClsSsg(num_classes=40)
    elif args.model.lower() == 'pointconv':
        model = PointConvDensityClsSsg(num_classes=40)
    else:
        print('Model not recognized')
        exit(-1)

    model = nn.DataParallel(model).cuda()

    # load model weight
    if args.model_path:
        model.load_state_dict(torch.load(args.model_path))
Example #19
0
class Model(object):
    def __init__(self):
        self.device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')

        self.model = DGCNN(256,char_file = config.char_embedding_path,\
            word_file = config.word_embedding_path).to(self.device)
        self.epoches = 150
        self.lr = 1e-4

        self.print_step = 15
        self.optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()),\
            lr=self.lr)

        self.best_model = DGCNN(256,char_file=config.char_embedding_path,\
            word_file = config.word_embedding_path).to(self.device)
        self._val_loss = 1e12

        #Debug


    def train(self,train_data,dev_data,threshold=0.1):
        for epoch in range(self.epoches):
            self.model.train()
            for i,item in enumerate(train_data):
                self.optimizer.zero_grad()
                Qc,Qw,q_mask,Ec,Ew,e_mask,As,Ae = [i.to(self.device) for i in item]
                As_, Ae_ = self.model([Qc,Qw,q_mask,Ec,Ew,e_mask])
                As_loss=focal_loss(As,As_,self.device)
                Ae_loss=focal_loss(Ae,Ae_,self.device)
                # batch_size, max_seq_len_e 
                
                mask=e_mask==1
                loss=(As_loss.masked_select(mask).sum()+Ae_loss.masked_select(mask).sum()) / e_mask.sum()
                loss.backward()
                self.optimizer.step()

                if (i+1)%self.print_step==0 or i==len(train_data)-1:
                    logger.info("In Training : Epoch : {} \t Step / All Step : {} / {} \t Loss of every char : {}"\
                        .format(epoch+1, i+1,len(train_data),loss.item()*100))

                #debug
                # if i==2000:
                #     break
            
            self.model.eval()
            with torch.no_grad():
                self.validate(dev_data)
            
    def test(self,test_data,threshold=0.1):
        self.best_model.eval()
        self.best_model.to(self.device)
        with torch.no_grad():
            sl,el,sl_,el_=[],[],[],[]
            for i, item in enumerate(test_data):
                Qc,Qw,q_mask,Ec,Ew,e_mask,As,Ae = [i.to(self.device) for i in item]
                mask=e_mask==1
                As_,Ae_ = self.model([Qc,Qw,q_mask,Ec,Ew,e_mask])
                As_,Ae_,As,Ae = [ i.masked_select(mask).cpu().numpy() for i in [As_,Ae_,As,Ae]]
                As_,Ae_ = np.where(As_>threshold,1,0), np.where(Ae_>threshold,1,0)
                As,Ae = As.astype(int),Ae.astype(int)
                sl.append(As)
                el.append(Ae)
                sl_.append(As_)
                el.append(el_)
            a=binary_confusion_matrix_evaluate(np.concatenate(sl),np.concatenate(sl_))
            b=binary_confusion_matrix_evaluate(np.concatenate(el),np.concatenate(el_))
            logger.info('In Test DataSet: START EVALUATION:\t Acc : {}\t Prec : {}\t Recall : {}\t F1-score : {}'\
                .format(a[0],a[1],a[2],a[3]))
            logger.info('In Test DataSet: START EVALUATION:\t Acc : {}\t Prec : {}\t Recall : {}\t F1-score : {}'\
                .format(b[0],b[1],b[2],b[3]))
                
    def validate(self,dev_data,threshold=0.1):
        val_loss=[]
        # import pdb; pdb.set_trace()
        for i, item in enumerate(dev_data):
            Qc,Qw,q_mask,Ec,Ew,e_mask,As,Ae = [i.to(self.device) for i in item]
            As_, Ae_ =  self.model([Qc,Qw,q_mask,Ec,Ew,e_mask])

            #cal loss
            As_loss,Ae_loss=focal_loss(As,As_,self.device) ,focal_loss(Ae,Ae_,self.device)
            mask=e_mask==1
            loss=(As_loss.masked_select(mask).sum() + Ae_loss.masked_select(mask).sum()) /  e_mask.sum()
            if (i+1)%self.print_step==0 or i==len(dev_data)-1:
                logger.info("In Validation: Step / All Step : {} / {} \t Loss of every char : {}"\
                    .format(i+1,len(dev_data),loss.item()*100))
            val_loss.append(loss.item())
            
            
            As_,Ae_,As,Ae = [ i.masked_select(mask).cpu().numpy() for i in [As_,Ae_,As,Ae]]
            As_,Ae_ = np.where(As_>threshold,1,0), np.where(Ae_>threshold,1,0)
            As,Ae = As.astype(int),Ae.astype(int)
            
            acc,prec,recall,f1=binary_confusion_matrix_evaluate(As,As_)
            
            logger.info('START EVALUATION :\t Acc : {}\t Prec : {}\t Recall : {}\t F1-score : {}'\
                .format(acc,prec,recall,f1))
            acc,prec,recall,f1=binary_confusion_matrix_evaluate(Ae,Ae_)
            logger.info('END EVALUATION :\t Acc : {}\t Prec : {}\t Recall : {}\t F1-score : {}'\
                .format(acc,prec,recall,f1))
            # [ , seq_len]
        l=sum(val_loss)/len(val_loss)
        logger.info('In Validation, Average Loss : {}'.format(l*100))
        if l<self._val_loss:
            logger.info('Update best Model in Valiation Dataset')
            self._val_loss=l
            self.best_model=deepcopy(self.model)


    def load_model(self,PATH):
        self.best_model.load_state_dict(torch.load(PATH))
        self.best_model.eval()

    def save_model(self,PATH):
        torch.save(self.best_model.state_dict(),PATH)
        logger.info('save best model successfully')

    '''
    这里的Data是指含有原始文本的数据List[ dict ]
    - test_data
    | - { 'question', 'evidences', 'answer'}
    '''
    def get_test_answer(self,test_data,word2id,char2id):
        all_item =  len(test_data)
        t1=0.
        t3=0.
        t5=0.
        self.best_model.eval()
        with torch.no_grad():
            for item in test_data:
                q_text = item['question']
                e_texts = item['evidences']
                a = item['answer']
                a_ = extract_answer(q_text,e_texts,word2id,char2id)
                # a_  list of [ answer , possibility]
                n=len(a_)

                a_1 = {i[0] for i in a_[:1]}
                a_3 = {i[0] for i in a_[:3]}
                a_5 = {i[0] for i in a_[:5]}

                if a[0] == 'no_answer' and n==0:
                    t1+=1
                    t3+=1
                    t5+=1
                
                if [i for i in a if i in a_1]:
                    t1+=1
                if [i for i in a if i in a_3]:
                    t3+=1
                if [i for i in a if i in a_5]:
                    t5+=1
        
        logger.info('In Test Raw File')
        logger.info('Top One Answer : Acc : {}'.format(t1/all_item))
        logger.info('Top Three Answer : Acc : {}'.format(t3/all_item))
        logger.info('Top Five Answer : Acc : {}'.format(t5/all_item))
        
    def extract_answer(self,q_text,e_texts,word2id,char2id,maxlen=10,threshold=0.1):
        Qc,Qw,Ec,Ew= [],[],[],[]
        qc = list(q_text)
        Qc,q_mask=sent2id([qc],char2id)

        qw = alignWord2Char(tokenize(q_text))
        Qw,q_mask_=sent2id([qw],word2id)

        assert torch.all(q_mask == q_mask_)

        tmp = [(list(e),alignWord2Char(tokenize(e))) for e in e_texts]
        ec,ew = zip(*tmp)

        Ec,e_mask=sent2id(list(ec),char2id)
        Ew,e_mask_=sent2id(list(ew),word2id)
        assert torch.all(e_mask == e_mask_)

        totensor=lambda x: torch.from_numpy(np.array(x)).long()

        L=[Qc,Qw,q_mask,Ec,Ew,e_mask]
        L=[totensor(x) for x in L]

        As_ , Ae_ = self.best_model(L)

        R={}
        for as_ ,ae_ , e in zip(As_,Ae_,e_texts):
            as_ ,ae_ = as_[:len(e)].numpy() , ae_[:len(e)].numpy()
            sidx = torch.where(as_>threshold)[0]
            eidx = torch.where(ae_>threshold)[0]
            result = { }
            for i in sidx:
                cond = (eidx >= i) & (eidx < i+maxlen)
                for j in eidx[cond]:
                    key=e[i:j+1]
                    result[key]=max(result.get(key,0),as_[i] * ae_[j])
            if result:
                for k,v in result.items():
                    if k not in R:
                        R[k]=[]
                    R[k].append(v)        
        # sort all answer
        R= [
            [k,((np.array(v)**2).sum()/(sum(v)+1))]
            for k , v in R.items()
        ]

        R.sort(key=lambda x: x[1], reversed=True)
        # R 降序排列的 (answer, possibility)
        return R
Example #20
0
    hp.eval_batch_size,
    hp.gpu_nums,
    shuffle=False)

handle = tf.placeholder(tf.string, shape=[])
iter = tf.data.Iterator.from_string_handle(handle, train_batches.output_types,
                                           train_batches.output_shapes)

# create a iter of the correct shape and type
xs, ys, labels = iter.get_next()
logging.info('# init data')
training_iter = train_batches.make_one_shot_iterator()
val_iter = eval_batches.make_initializable_iterator()

logging.info("# Load model")
m = DGCNN(hp)

# get op
train_op, train_loss, train_summaries, global_step = m.train1(xs, ys, labels)
indexs, eval_loss, eval_summaries = m.eval(xs, ys, labels)

token2idx, idx2token = load_vocab(hp.vocab)

logging.info("# Session")
saver = tf.train.Saver(max_to_keep=hp.num_epochs)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
    ckpt = tf.train.latest_checkpoint(hp.logdir)
    if ckpt is None:
        logging.info("Initializing from scratch")
        sess.run(tf.global_variables_initializer())
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--num_points',
                        type=int,
                        default=4096,
                        help='num of points to use')
    parser.add_argument('--dropout',
                        type=float,
                        default=0.5,
                        help='dropout rate')
    parser.add_argument('--emb_dims',
                        type=int,
                        default=1024,
                        metavar='N',
                        help='Dimension of embeddings')
    parser.add_argument('--k',
                        type=int,
                        default=40,
                        metavar='N',
                        help='Num of nearest neighbors to use')
    args = parser.parse_args()
    # load models
    if args.model == 'pointnet':
        model = PointNet(args)
    elif args.model == 'dgcnn':
        model = DGCNN(args)

    print('#parameters %d' % sum([x.nelement() for x in model.parameters()]))
Example #22
0
File: main.py Project: sngver/dgcnn
def train(args, io):
    train_loader = DataLoader(ModelNet40(partition='train',
                                         num_points=args.num_points),
                              num_workers=8,
                              batch_size=args.batch_size,
                              shuffle=True,
                              drop_last=True)
    test_loader = DataLoader(ModelNet40(partition='test',
                                        num_points=args.num_points),
                             num_workers=8,
                             batch_size=args.test_batch_size,
                             shuffle=True,
                             drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")

    #Try to load models
    if args.model == 'pointnet':
        model = PointNet(args).to(device)
    elif args.model == 'dgcnn':
        model = DGCNN(args).to(device)
    else:
        raise Exception("Not implemented")
    print(str(model))

    model = nn.DataParallel(model)
    print("Let's use", torch.cuda.device_count(), "GPUs!")

    if args.use_sgd:
        print("Use SGD")
        opt = optim.SGD(model.parameters(),
                        lr=args.lr * 100,
                        momentum=args.momentum,
                        weight_decay=1e-4)
    else:
        print("Use Adam")
        opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)

    scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr)

    criterion = cal_loss

    best_test_acc = 0
    for epoch in range(args.epochs):
        scheduler.step()
        ####################
        # Train
        ####################
        train_loss = 0.0
        count = 0.0
        model.train()
        train_pred = []
        train_true = []
        for data, label in train_loader:
            data, label = data.to(device), label.to(device).squeeze()
            data = data.permute(0, 2, 1)
            batch_size = data.size()[0]
            opt.zero_grad()
            logits = model(data)
            loss = criterion(logits, label)
            loss.backward()
            opt.step()
            preds = logits.max(dim=1)[1]
            count += batch_size
            train_loss += loss.item() * batch_size
            train_true.append(label.cpu().numpy())
            train_pred.append(preds.detach().cpu().numpy())
        train_true = np.concatenate(train_true)
        train_pred = np.concatenate(train_pred)
        outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (
            epoch, train_loss * 1.0 / count,
            metrics.accuracy_score(train_true, train_pred),
            metrics.balanced_accuracy_score(train_true, train_pred))
        io.cprint(outstr)

        ####################
        # Test
        ####################
        test_loss = 0.0
        count = 0.0
        model.eval()
        test_pred = []
        test_true = []
        for data, label in test_loader:
            data, label = data.to(device), label.to(device).squeeze()
            data = data.permute(0, 2, 1)
            batch_size = data.size()[0]
            logits = model(data)
            loss = criterion(logits, label)
            preds = logits.max(dim=1)[1]
            count += batch_size
            test_loss += loss.item() * batch_size
            test_true.append(label.cpu().numpy())
            test_pred.append(preds.detach().cpu().numpy())
        test_true = np.concatenate(test_true)
        test_pred = np.concatenate(test_pred)
        test_acc = metrics.accuracy_score(test_true, test_pred)
        avg_per_class_acc = metrics.balanced_accuracy_score(
            test_true, test_pred)
        outstr = 'Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f' % (
            epoch, test_loss * 1.0 / count, test_acc, avg_per_class_acc)
        io.cprint(outstr)
        if test_acc >= best_test_acc:
            best_test_acc = test_acc
            torch.save(model.state_dict(),
                       'checkpoints/%s/models/model.t7' % args.exp_name)
Example #23
0
def test(args, io):
    test_loader = DataLoader(ModelNet40(partition='test',
                                        num_points=args.num_points),
                             batch_size=args.test_batch_size,
                             shuffle=True,
                             drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")

    #Try to load models
    if args.model == 'pointnet':
        model = PointNet(args).to(device)
    elif args.model == 'dgcnn':
        model = DGCNN(args).to(device)
    elif args.model == 'ssg':
        model = PointNet2SSG(output_classes=40, dropout_prob=0)
        model.to(device)
    elif args.model == 'msg':
        model = PointNet2MSG(output_classes=40, dropout_prob=0)
        model.to(device)
    elif args.model == 'ognet':
        # [64,128,256,512]
        model = Model_dense(20,
                            args.feature_dims, [512],
                            output_classes=40,
                            init_points=768,
                            input_dims=3,
                            dropout_prob=args.dropout,
                            id_skip=args.id_skip,
                            drop_connect_rate=args.drop_connect_rate,
                            cluster='xyzrgb',
                            pre_act=args.pre_act,
                            norm=args.norm_layer)
        if args.efficient:
            model = ModelE_dense(20,
                                 args.feature_dims, [512],
                                 output_classes=40,
                                 init_points=768,
                                 input_dims=3,
                                 dropout_prob=args.dropout,
                                 id_skip=args.id_skip,
                                 drop_connect_rate=args.drop_connect_rate,
                                 cluster='xyzrgb',
                                 pre_act=args.pre_act,
                                 norm=args.norm_layer,
                                 gem=args.gem,
                                 ASPP=args.ASPP)
        model.to(device)
    elif args.model == 'ognet-small':
        # [48,96,192,384]
        model = Model_dense(20,
                            args.feature_dims, [512],
                            output_classes=40,
                            init_points=768,
                            input_dims=3,
                            dropout_prob=args.dropout,
                            id_skip=args.id_skip,
                            drop_connect_rate=args.drop_connect_rate,
                            cluster='xyzrgb',
                            pre_act=args.pre_act,
                            norm=args.norm_layer)
        model.to(device)
    else:
        raise Exception("Not implemented")

    try:
        model.load_state_dict(torch.load(args.model_path))
    except:
        model = nn.DataParallel(model)
        model.load_state_dict(torch.load(args.model_path))
    model = model.eval()
    model = model.module

    batch0, label0 = next(iter(test_loader))
    batch0 = batch0[0].unsqueeze(0)
    print(batch0.shape)
    print(model)

    macs, params = get_model_complexity_info(model,
                                             batch0, ((1024, 3)),
                                             as_strings=True,
                                             print_per_layer_stat=False,
                                             verbose=True)

    print('{:<30}  {:<8}'.format('Computational complexity: ', macs))
    print('{:<30}  {:<8}'.format('Number of parameters: ', params))

    test_acc = 0.0
    count = 0.0
    test_true = []
    test_pred = []
    for data, label in test_loader:

        data, label = data.to(device), label.to(device).squeeze()
        batch_size = data.size()[0]
        if args.model == 'ognet' or args.model == 'ognet-small' or args.model == 'ssg' or args.model == 'msg':
            logits = model(data, data)
            #logits = model(1.1*data, 1.1*data)
        else:
            data = data.permute(0, 2, 1)
            logits = model(data)
        preds = logits.max(dim=1)[1]
        test_true.append(label.cpu().numpy())
        test_pred.append(preds.detach().cpu().numpy())
    test_true = np.concatenate(test_true)
    test_pred = np.concatenate(test_pred)
    test_acc = metrics.accuracy_score(test_true, test_pred)
    avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
    outstr = 'Test :: test acc: %.6f, test avg acc: %.6f' % (test_acc,
                                                             avg_per_class_acc)
    io.cprint(outstr)
Example #24
0
def main(args, print_fn=print):
    print_fn("Experiment arguments: {}".format(args))

    if args.random_seed:
        torch.manual_seed(args.random_seed)
    else:
        torch.manual_seed(123)
    # Load dataset
    if args.dataset.startswith('ogbl'):
        graph, split_edge = load_ogb_dataset(args.dataset)
    else:
        raise NotImplementedError

    num_nodes = graph.num_nodes()

    # set gpu
    if args.gpu_id >= 0 and torch.cuda.is_available():
        device = 'cuda:{}'.format(args.gpu_id)
    else:
        device = 'cpu'

    if args.dataset == 'ogbl-collab':
        # ogbl-collab dataset is multi-edge graph
        use_coalesce = True
    else:
        use_coalesce = False

    # Generate positive and negative edges and corresponding labels
    # Sampling subgraphs and generate node labeling features
    seal_data = SEALData(g=graph, split_edge=split_edge, hop=args.hop, neg_samples=args.neg_samples,
                         subsample_ratio=args.subsample_ratio, use_coalesce=use_coalesce, prefix=args.dataset,
                         save_dir=args.save_dir, num_workers=args.num_workers, print_fn=print_fn)
    node_attribute = seal_data.ndata['feat']
    edge_weight = seal_data.edata['weight'].float()

    train_data = seal_data('train')
    val_data = seal_data('valid')
    test_data = seal_data('test')

    train_graphs = len(train_data.graph_list)

    # Set data loader

    train_loader = GraphDataLoader(train_data, batch_size=args.batch_size, num_workers=args.num_workers)
    val_loader = GraphDataLoader(val_data, batch_size=args.batch_size, num_workers=args.num_workers)
    test_loader = GraphDataLoader(test_data, batch_size=args.batch_size, num_workers=args.num_workers)

    # set model
    if args.model == 'gcn':
        model = GCN(num_layers=args.num_layers,
                    hidden_units=args.hidden_units,
                    gcn_type=args.gcn_type,
                    pooling_type=args.pooling,
                    node_attributes=node_attribute,
                    edge_weights=edge_weight,
                    node_embedding=None,
                    use_embedding=True,
                    num_nodes=num_nodes,
                    dropout=args.dropout)
    elif args.model == 'dgcnn':
        model = DGCNN(num_layers=args.num_layers,
                      hidden_units=args.hidden_units,
                      k=args.sort_k,
                      gcn_type=args.gcn_type,
                      node_attributes=node_attribute,
                      edge_weights=edge_weight,
                      node_embedding=None,
                      use_embedding=True,
                      num_nodes=num_nodes,
                      dropout=args.dropout)
    else:
        raise ValueError('Model error')

    model = model.to(device)
    parameters = model.parameters()
    optimizer = torch.optim.Adam(parameters, lr=args.lr)
    loss_fn = BCEWithLogitsLoss()
    print_fn("Total parameters: {}".format(sum([p.numel() for p in model.parameters()])))

    # train and evaluate loop
    summary_val = []
    summary_test = []
    for epoch in range(args.epochs):
        start_time = time.time()
        loss = train(model=model,
                     dataloader=train_loader,
                     loss_fn=loss_fn,
                     optimizer=optimizer,
                     device=device,
                     num_graphs=args.batch_size,
                     total_graphs=train_graphs)
        train_time = time.time()
        if epoch % args.eval_steps == 0:
            val_pos_pred, val_neg_pred = evaluate(model=model,
                                                  dataloader=val_loader,
                                                  device=device)
            test_pos_pred, test_neg_pred = evaluate(model=model,
                                                    dataloader=test_loader,
                                                    device=device)

            val_metric = evaluate_hits(args.dataset, val_pos_pred, val_neg_pred, args.hits_k)
            test_metric = evaluate_hits(args.dataset, test_pos_pred, test_neg_pred, args.hits_k)
            evaluate_time = time.time()
            print_fn("Epoch-{}, train loss: {:.4f}, hits@{}: val-{:.4f}, test-{:.4f}, "
                     "cost time: train-{:.1f}s, total-{:.1f}s".format(epoch, loss, args.hits_k, val_metric, test_metric,
                                                                      train_time - start_time,
                                                                      evaluate_time - start_time))
            summary_val.append(val_metric)
            summary_test.append(test_metric)

    summary_test = np.array(summary_test)

    print_fn("Experiment Results:")
    print_fn("Best hits@{}: {:.4f}, epoch: {}".format(args.hits_k, np.max(summary_test), np.argmax(summary_test)))
Example #25
0
def train(args, io):
    train_loader = DataLoader(ModelNet40(partition='train',
                                         num_points=args.num_points),
                              num_workers=8,
                              batch_size=args.batch_size,
                              shuffle=True,
                              drop_last=True)
    test_loader = DataLoader(ModelNet40(partition='test',
                                        num_points=args.num_points),
                             num_workers=8,
                             batch_size=args.test_batch_size,
                             shuffle=True,
                             drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")

    #Try to load models
    if args.model == 'pointnet':
        model = PointNet(args).to(device)
    elif args.model == 'dgcnn':
        model = DGCNN(args).to(device)
    elif args.model == 'ssg':
        model = PointNet2SSG(output_classes=40, dropout_prob=args.dropout)
        model.to(device)
    elif args.model == 'msg':
        model = PointNet2MSG(output_classes=40, dropout_prob=args.dropout)
        model.to(device)
    elif args.model == 'ognet':
        # [64,128,256,512]
        model = Model_dense(20,
                            args.feature_dims, [512],
                            output_classes=40,
                            init_points=768,
                            input_dims=3,
                            dropout_prob=args.dropout,
                            id_skip=args.id_skip,
                            drop_connect_rate=args.drop_connect_rate,
                            cluster='xyzrgb',
                            pre_act=args.pre_act,
                            norm=args.norm_layer)
        if args.efficient:
            model = ModelE_dense(20,
                                 args.feature_dims, [512],
                                 output_classes=40,
                                 init_points=768,
                                 input_dims=3,
                                 dropout_prob=args.dropout,
                                 id_skip=args.id_skip,
                                 drop_connect_rate=args.drop_connect_rate,
                                 cluster='xyzrgb',
                                 pre_act=args.pre_act,
                                 norm=args.norm_layer,
                                 gem=args.gem,
                                 ASPP=args.ASPP)
        model.to(device)
    elif args.model == 'ognet-small':
        # [48,96,192,384]
        model = Model_dense(20,
                            args.feature_dims, [512],
                            output_classes=40,
                            init_points=768,
                            input_dims=3,
                            dropout_prob=args.dropout,
                            id_skip=args.id_skip,
                            drop_connect_rate=args.drop_connect_rate,
                            cluster='xyzrgb',
                            pre_act=args.pre_act,
                            norm=args.norm_layer)
        model.to(device)
    else:
        raise Exception("Not implemented")
    print(str(model))

    model = nn.DataParallel(model)
    print("Let's use", torch.cuda.device_count(), "GPUs!")

    if args.use_sgd:
        print("Use SGD")
        opt = optim.SGD(model.parameters(),
                        lr=args.lr * 100,
                        momentum=args.momentum,
                        weight_decay=1e-4)
        scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr)
    else:
        print("Use Adam")
        opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
        scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=0.01 * args.lr)

    criterion = cal_loss

    best_test_acc = 0
    best_avg_per_class_acc = 0

    warm_up = 0.1  # We start from the 0.1*lrRate
    warm_iteration = round(
        len(ModelNet40(partition='train', num_points=args.num_points)) /
        args.batch_size) * args.warm_epoch  # first 5 epoch
    for epoch in range(args.epochs):
        scheduler.step()
        ####################
        # Train
        ####################
        train_loss = 0.0
        count = 0.0
        model.train()
        train_pred = []
        train_true = []
        for data, label in train_loader:
            data, label = data.to(device), label.to(device).squeeze()
            batch_size = data.size()[0]
            opt.zero_grad()
            if args.model == 'ognet' or args.model == 'ognet-small' or args.model == 'ssg' or args.model == 'msg':
                logits = model(data, data)
            else:
                data = data.permute(0, 2, 1)
                logits = model(data)
            loss = criterion(logits, label)
            if epoch < args.warm_epoch:
                warm_up = min(1.0, warm_up + 0.9 / warm_iteration)
                loss *= warm_up
            loss.backward()
            opt.step()
            preds = logits.max(dim=1)[1]
            count += batch_size
            train_loss += loss.item() * batch_size
            train_true.append(label.cpu().numpy())
            train_pred.append(preds.detach().cpu().numpy())
        train_true = np.concatenate(train_true)
        train_pred = np.concatenate(train_pred)
        outstr = 'Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (
            epoch, train_loss * 1.0 / count,
            metrics.accuracy_score(train_true, train_pred),
            metrics.balanced_accuracy_score(train_true, train_pred))
        io.cprint(outstr)

        ####################
        # Test
        ####################
        test_loss = 0.0
        count = 0.0
        model.eval()
        test_pred = []
        test_true = []
        for data, label in test_loader:
            data, label = data.to(device), label.to(device).squeeze()
            batch_size = data.size()[0]
            if args.model == 'ognet' or args.model == 'ognet-small' or args.model == 'ssg' or args.model == 'msg':
                logits = model(data, data)
            else:
                data = data.permute(0, 2, 1)
                logits = model(data)
            loss = criterion(logits, label)
            preds = logits.max(dim=1)[1]
            count += batch_size
            test_loss += loss.item() * batch_size
            test_true.append(label.cpu().numpy())
            test_pred.append(preds.detach().cpu().numpy())
        test_true = np.concatenate(test_true)
        test_pred = np.concatenate(test_pred)
        test_acc = metrics.accuracy_score(test_true, test_pred)
        avg_per_class_acc = metrics.balanced_accuracy_score(
            test_true, test_pred)
        outstr = 'Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f' % (
            epoch, test_loss * 1.0 / count, test_acc, avg_per_class_acc)
        io.cprint(outstr)
        if test_acc + avg_per_class_acc >= best_test_acc + best_avg_per_class_acc:
            best_test_acc = test_acc
            best_avg_per_class_acc = avg_per_class_acc
            print('This is the current best.')
            torch.save(model.state_dict(),
                       'checkpoints/%s/models/model.t7' % args.exp_name)
                        help='node rank for distributed training')
    args = parser.parse_args()
    BATCH_SIZE = BATCH_SIZE[args.num_points]
    BEST_WEIGHTS = BEST_WEIGHTS[args.dataset][args.num_points]
    if args.batch_size == -1:
        args.batch_size = BATCH_SIZE[args.model]
    set_seed(1)
    print(args)

    dist.init_process_group(backend='nccl')
    torch.cuda.set_device(args.local_rank)
    cudnn.benchmark = True

    # build model
    if args.model.lower() == 'dgcnn':
        model = DGCNN(args.emb_dims, args.k, output_channels=40)
    elif args.model.lower() == 'pointnet':
        model = PointNetCls(k=40, feature_transform=args.feature_transform)
    elif args.model.lower() == 'pointnet2':
        model = PointNet2ClsSsg(num_classes=40)
    elif args.model.lower() == 'pointconv':
        model = PointConvDensityClsSsg(num_classes=40)
    else:
        print('Model not recognized')
        exit(-1)

    # load model weight
    state_dict = torch.load(
        BEST_WEIGHTS[args.model], map_location='cpu')
    print('Loading weight {}'.format(BEST_WEIGHTS[args.model]))
    try:
Example #27
0
File: main.py Project: ajlee19/gdp
def train(args, io):
    train_loader = DataLoader(ModelNet40(partition='train', num_points=1024),
                              batch_size=args.batch_size,
                              shuffle=True,
                              drop_last=True)
    test_loader = DataLoader(ModelNet40(partition='test', num_points=1024),
                             batch_size=args.test_batch_size,
                             shuffle=True,
                             drop_last=False)

    device = torch.device("cuda" if args.cuda else "cpu")

    #Try to load models
    model = DGCNN().to(device)

    if args.use_sgd:
        print("Use SGD")
        opt = optim.SGD(model.parameters(),
                        lr=args.lr * 100,
                        momentum=args.momentum)
    else:
        print("Use Adam")
        opt = optim.Adam(model.parameters(), lr=args.lr)

    scheduler = MultiStepLR(opt, milestones=[100, 150], gamma=0.1)

    best_test_acc = 0
    for epoch in range(args.epochs):
        scheduler.step()
        ####################
        # Train
        ####################
        train_loss = 0.0
        train_acc = 0.0
        count = 0.0
        model.train()
        for data, label in train_loader:
            data, label = data.to(device), label.to(device).squeeze()
            data = data.permute(0, 2, 1)
            batch_size = data.size()[0]
            opt.zero_grad()
            logits = model(data)
            loss = F.cross_entropy(logits, label)
            loss.backward()
            opt.step()
            preds = logits.max(dim=1)[1]
            count += batch_size
            train_loss += loss.item() * batch_size
            train_acc += (preds == label).sum().item()
        outstr = 'Train %d, loss: %.6f, acc: %.6f' % (
            epoch, train_loss * 1.0 / count, train_acc * 1.0 / count)
        train_losslst.append(train_loss * 1.0 / count)
        train_acclst.append(train_acc * 1.0 / count)
        io.cprint(outstr)

        ####################
        # Test
        ####################
        test_loss = 0.0
        test_acc = 0.0
        count = 0.0
        model.eval()
        for data, label in test_loader:
            data, label = data.to(device), label.to(device).squeeze()
            data = data.permute(0, 2, 1)
            batch_size = data.size()[0]
            logits = model(data)
            preds = logits.max(dim=1)[1]
            count += batch_size
            test_loss += loss.item() * batch_size
            test_acc += (preds == label).sum().item()
        outstr = 'Test %d, loss: %.6f, acc: %.6f' % (
            epoch, test_loss * 1.0 / count, test_acc * 1.0 / count)
        test_losslst.append(test_loss * 1.0 / count)
        test_acclst.append(test_acc * 1.0 / count)
        io.cprint(outstr)
        if test_acc >= best_test_acc:
            best_test_acc = test_acc
            torch.save(model, 'checkpoints/%s/models/model.t7' % args.exp_name)

        io.cprint("Averate testing accuracy: " +
                  str(sum(test_acclst) / len(test_acclst)) +
                  '----------------------\n')