def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = "1"

    print("Load dataset...")
    DATA_PATH = config.DATA['root']

    TRAIN_DATASET = ModelNetDataLoader(root=DATA_PATH,
                                       npoint=config.SETTING['num_point'],
                                       split='train',
                                       normal_channel=config.SETTING['normal'])
    TEST_DATASET = ModelNetDataLoader(root=DATA_PATH,
                                      npoint=config.SETTING['num_point'],
                                      split='test',
                                      normal_channel=config.SETTING['normal'])
    trainDataLoader = torch.utils.data.DataLoader(
        TRAIN_DATASET,
        batch_size=config.TRAIN['batch_size'],
        shuffle=True,
        num_workers=4)
    testDataLoader = torch.utils.data.DataLoader(
        TEST_DATASET,
        batch_size=config.TRAIN['batch_size'],
        shuffle=False,
        num_workers=4)
    print("ok!")

    print("Check device...")
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(device)
    print("ok!")

    print("Load model...")
    if config.TRAIN['model'] == "POINTNET":
        model = PointNet(config.DATA['num_class'],
                         normal_channel=config.SETTING['normal'])
    elif config.TRAIN['model'] == "DGCNN":
        model = DGCNN(config.DATA['num_class'])
    model = model.to(device)
    print("ok!")

    print("Load optimizer...")
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=config.TRAIN['learning_rate'],
                                 betas=(0.9, 0.999),
                                 eps=1e-08,
                                 weight_decay=config.TRAIN['decay_rate'])
    print("ok!")

    # Starting training
    print("Starting training... ")

    best = 1

    for epoch in range(config.TRAIN['epoch']):
        train(epoch, trainDataLoader, optimizer, model, device)
        acc = valid(testDataLoader, model, device)
Пример #2
0
        pred = torch.argmax(pred, dim=-1)
        pred, y = pred.cpu().detach().numpy(), batch.y.detach().numpy()
        correct.append(np.mean(pred == y))
    return np.mean(correct)


DEVICE = torch.device('cuda') if torch.cuda.is_available() else torch.device(
    'cpu')

pre_transform, transform = T.NormalizeScale(), T.SamplePoints(1024)
dataset = ModelNet('./data/model-net', '10', True, transform, pre_transform)
dataloader = DataLoader(dataset, num_workers=2, batch_size=16, shuffle=True)

loss_dict = {}
for lr in (5e-3, ):
    model = PointNet(3, [64, 64], [128, 128, 128], 10).to(DEVICE)
    optimizer = optim.Adam(model.parameters(), lr=lr)
    criterion = nn.CrossEntropyLoss()

    key = lr
    loss_dict[key] = []

    for epoch in range(5):
        loss = train_epoch()
        loss_dict[key].append(loss)
        acc = test()
        print(f'Key {key} epoch {epoch}, loss {loss: 0.2f}, acc {acc: 0.2f}')
    print()

plt.figure()
for k in loss_dict.keys():
Пример #3
0
def main():

    #Parse Arguments
    parser = argparse.ArgumentParser(
        description=
        'Uses MDAnalysis and PointNet to identify largest cluster of solid-like atoms'
    )
    parser.add_argument('--weights',
                        help='folder containing network weights to use',
                        type=str,
                        required=True)
    parser.add_argument('--nclass',
                        help='number of classes',
                        type=int,
                        required=True)
    parser.add_argument('--trjpath',
                        help='path to gro/xtc files',
                        type=str,
                        required=True)
    parser.add_argument('--cutoff',
                        help='neighbor cutoff distance (in nm)',
                        type=float,
                        required=True)
    parser.add_argument('--maxneigh',
                        help='max number of neighbors',
                        type=int,
                        required=True)
    parser.add_argument('--outname',
                        help='output file name',
                        type=str,
                        required=True)

    args = parser.parse_args()

    # Import topology
    u = mda.Universe(args.trjpath + '.gro', args.trjpath + '.xtc')

    # File to write output
    f_summary = open(args.outname + '_summary.mda', 'w')
    f_class = open(args.outname + '_class.mda', 'w')

    f_summary.write(
        "# Time, Largest_cls_solid, n_liq, n_fcc, n_hcp, n_bcc, n_solid\n")

    # Here is where we initialize the pointnet
    pointnet = PointNet(n_points=args.maxneigh,
                        n_classes=args.nclass,
                        weights_dir=args.weights)

    # Loop over trajectory
    for ts in u.trajectory:
        # Generate neighbor list
        print("Generating neighbor list")
        nlist = nsgrid.FastNS(args.cutoff * 10.0, u.atoms.positions,
                              ts.dimensions).self_search()

        # Extract required info
        ndxs = nlist.get_indices()
        dxs = nlist.get_dx()
        dists = nlist.get_distances()
        print("Extracted all relevant information")

        samples = []
        # Prepare samples to send through pointnet
        for i in range(len(dxs)):
            ## Sort neighbors by distance (so that we can
            ## normalize all distances such that the distance
            ## to the closest neighbor is 1.0 )
            nneigh = int(len(dxs[i]) / 3)
            np_dxs = np.asarray(dxs[i]).reshape([nneigh, 3])
            sort_order = np.asarray(dists[i]).argsort()
            # Sort neighbors by distance
            np_dxs = np_dxs[sort_order]
            if nneigh > 0:
                np_dxs /= np.linalg.norm(np_dxs[0])
            # Now correctly size/pad the point cloud
            if nneigh < args.maxneigh:
                np_dxs = np.pad(
                    np_dxs,
                    [(0, args.maxneigh - nneigh), (0, 0)],
                    'constant',
                )
            elif nneigh > args.maxneigh:
                np_dxs = np_dxs[:args.maxneigh]

            # Append sample info
            samples.append(np_dxs)

        # And convert to np array
        np_samples = np.asarray(samples)
        print("Frame {}, Shape sent to pointnet: {}".format(
            ts.frame, np_samples.shape))
        sys.stdout.flush()

        # Send sample through inference
        results = pointnet.infer_nolabel(np_samples)
        results = np.asarray(results)

        print("Frame {}, Results returned from pointnet, shape {}".format(
            ts.frame, results.shape))
        sys.stdout.flush()

        # Extract different atom types
        liquid_atoms = np.where(results == 0)[0]
        fcc_atoms = np.where(results == 1)[0]
        hcp_atoms = np.where(results == 2)[0]
        bcc_atoms = np.where(results == 3)[0]
        solid_atoms = np.where(results > 0)[0]
        print("%d total solid atoms" % solid_atoms.shape[0])

        ## Now we are going to construct the largest cluster of
        ## solid atoms in the system (i.e., a solid nucleus)

        # We need neighbor lists for connectivity cutoff
        # Using 15.0 Angstroms (mda units) here
        nlist = nsgrid.FastNS(15.0, u.atoms.positions,
                              ts.dimensions).self_search()

        pairs = nlist.get_pairs()

        # Find the largest cluster of solids (not liquid)
        G = nx.Graph()
        G.add_edges_from(pairs)
        G.remove_nodes_from(liquid_atoms)
        largest_cluster = max(nx.connected_component_subgraphs(G), key=len)

        f_summary.write("{:8.3f}{:8d}{:8d}{:8d}{:8d}{:8d}{:8d}\n".format(
            ts.time, len(largest_cluster), liquid_atoms.shape[0],
            fcc_atoms.shape[0], hcp_atoms.shape[0], bcc_atoms.shape[0],
            solid_atoms.shape[0]))

        for node in largest_cluster:
            f_class.write("{:10d}{:8d}{:8d}\n".format(ts.frame + 1, node,
                                                      results[node]))

    f_summary.close()
    f_class.close()
Пример #4
0
def main(config):
    set_seed(config['seed'])

    results_dir = prepare_results_dir(config, 'aae', 'training')
    starting_epoch = find_latest_epoch(results_dir) + 1

    if not exists(join(results_dir, 'config.json')):
        with open(join(results_dir, 'config.json'), mode='w') as f:
            json.dump(config, f)

    setup_logging(results_dir)
    log = logging.getLogger('aae')

    device = cuda_setup(config['cuda'], config['gpu'])
    log.info(f'Device variable: {device}')
    if device.type == 'cuda':
        log.info(f'Current CUDA device: {torch.cuda.current_device()}')

    weights_path = join(results_dir, 'weights')
    metrics_path = join(results_dir, 'metrics')

    #
    # Dataset
    #
    dataset_name = config['dataset'].lower()
    if dataset_name == 'shapenet':
        from datasets.shapenet import ShapeNetDataset
        dataset = ShapeNetDataset(root_dir=config['data_dir'],
                                  classes=config['classes'])
    else:
        raise ValueError(f'Invalid dataset name. Expected `shapenet` or '
                         f'`faust`. Got: `{dataset_name}`')

    log.info("Selected {} classes. Loaded {} samples.".format(
        'all' if not config['classes'] else ','.join(config['classes']),
        len(dataset)))

    points_dataloader = DataLoader(dataset, batch_size=config['batch_size'],
                                   shuffle=config['shuffle'],
                                   num_workers=config['num_workers'],
                                   pin_memory=True)

    pointnet = config.get('pointnet', False)
    #
    # Models
    #
    hyper_network = aae.HyperNetwork(config, device).to(device)

    if pointnet:
        from models.pointnet import PointNet
        encoder = PointNet(config).to(device)
        # PointNet initializes it's own weights during instance creation
    else:
        encoder = aae.Encoder(config).to(device)
        encoder.apply(weights_init)

    discriminator = aae.Discriminator(config).to(device)

    hyper_network.apply(weights_init)
    discriminator.apply(weights_init)

    if config['reconstruction_loss'].lower() == 'chamfer':
        if pointnet:
            from utils.metrics import chamfer_distance
            reconstruction_loss = chamfer_distance
        else:
            from losses.champfer_loss import ChamferLoss
            reconstruction_loss = ChamferLoss().to(device)
    elif config['reconstruction_loss'].lower() == 'earth_mover':
        from utils.metrics import earth_mover_distance
        reconstruction_loss = earth_mover_distance
    else:
        raise ValueError(f'Invalid reconstruction loss. Accepted `chamfer` or '
                         f'`earth_mover`, got: {config["reconstruction_loss"]}')

    #
    # Optimizers
    #
    e_hn_optimizer = getattr(optim, config['optimizer']['E_HN']['type'])
    e_hn_optimizer = e_hn_optimizer(chain(encoder.parameters(), hyper_network.parameters()),
                                    **config['optimizer']['E_HN']['hyperparams'])

    discriminator_optimizer = getattr(optim, config['optimizer']['D']['type'])
    discriminator_optimizer = discriminator_optimizer(discriminator.parameters(),
                                                      **config['optimizer']['D']['hyperparams'])

    log.info("Starting epoch: %s" % starting_epoch)
    if starting_epoch > 1:
        log.info("Loading weights...")
        hyper_network.load_state_dict(torch.load(
            join(weights_path, f'{starting_epoch - 1:05}_G.pth')))
        encoder.load_state_dict(torch.load(
            join(weights_path, f'{starting_epoch - 1:05}_E.pth')))
        discriminator.load_state_dict(torch.load(
            join(weights_path, f'{starting_epoch-1:05}_D.pth')))

        e_hn_optimizer.load_state_dict(torch.load(
            join(weights_path, f'{starting_epoch - 1:05}_EGo.pth')))

        discriminator_optimizer.load_state_dict(torch.load(
            join(weights_path, f'{starting_epoch-1:05}_Do.pth')))

        log.info("Loading losses...")
        losses_e = np.load(join(metrics_path, f'{starting_epoch - 1:05}_E.npy')).tolist()
        losses_g = np.load(join(metrics_path, f'{starting_epoch - 1:05}_G.npy')).tolist()
        losses_eg = np.load(join(metrics_path, f'{starting_epoch - 1:05}_EG.npy')).tolist()
        losses_d = np.load(join(metrics_path, f'{starting_epoch - 1:05}_D.npy')).tolist()
    else:
        log.info("First epoch")
        losses_e = []
        losses_g = []
        losses_eg = []
        losses_d = []

    normalize_points = config['target_network_input']['normalization']['enable']
    if normalize_points:
        normalization_type = config['target_network_input']['normalization']['type']
        assert normalization_type == 'progressive', 'Invalid normalization type'

    target_network_input = None
    for epoch in range(starting_epoch, config['max_epochs'] + 1):
        start_epoch_time = datetime.now()
        log.debug("Epoch: %s" % epoch)
        hyper_network.train()
        encoder.train()
        discriminator.train()

        total_loss_all = 0.0
        total_loss_reconstruction = 0.0
        total_loss_encoder = 0.0
        total_loss_discriminator = 0.0
        total_loss_regularization = 0.0
        for i, point_data in enumerate(points_dataloader, 1):

            X, _ = point_data
            X = X.to(device)

            # Change dim [BATCH, N_POINTS, N_DIM] -> [BATCH, N_DIM, N_POINTS]
            if X.size(-1) == 3:
                X.transpose_(X.dim() - 2, X.dim() - 1)

            if pointnet:
                _, feature_transform, codes = encoder(X)
            else:
                codes, _, _ = encoder(X)

            # discriminator training
            noise = torch.empty(codes.shape[0], config['z_size']).normal_(mean=config['normal_mu'],
                                                                          std=config['normal_std']).to(device)
            synth_logit = discriminator(codes)
            real_logit = discriminator(noise)
            if config.get('wasserstein', True):
                loss_discriminator = torch.mean(synth_logit) - torch.mean(real_logit)

                alpha = torch.rand(codes.shape[0], 1).to(device)
                differences = codes - noise
                interpolates = noise + alpha * differences
                disc_interpolates = discriminator(interpolates)

                # gradient_penalty_function
                gradients = grad(
                    outputs=disc_interpolates,
                    inputs=interpolates,
                    grad_outputs=torch.ones_like(disc_interpolates).to(device),
                    create_graph=True,
                    retain_graph=True,
                    only_inputs=True)[0]
                slopes = torch.sqrt(torch.sum(gradients ** 2, dim=1))
                gradient_penalty = ((slopes - 1) ** 2).mean()
                loss_gp = config['gradient_penalty_coef'] * gradient_penalty
                loss_discriminator += loss_gp
            else:
                # An alternative is a = -1, b = 1 iff c = 0
                a = 0.0
                b = 1.0
                loss_discriminator = 0.5 * ((real_logit - b)**2 + (synth_logit - a)**2)

            discriminator_optimizer.zero_grad()
            discriminator.zero_grad()

            loss_discriminator.backward(retain_graph=True)
            total_loss_discriminator += loss_discriminator.item()
            discriminator_optimizer.step()

            # hyper network training
            target_networks_weights = hyper_network(codes)

            X_rec = torch.zeros(X.shape).to(device)
            for j, target_network_weights in enumerate(target_networks_weights):
                target_network = aae.TargetNetwork(config, target_network_weights).to(device)

                if not config['target_network_input']['constant'] or target_network_input is None:
                    target_network_input = generate_points(config=config, epoch=epoch, size=(X.shape[2], X.shape[1]))

                X_rec[j] = torch.transpose(target_network(target_network_input.to(device)), 0, 1)

            if pointnet:
                loss_reconstruction = config['reconstruction_coef'] * \
                                      reconstruction_loss(torch.transpose(X, 1, 2).contiguous(),
                                                          torch.transpose(X_rec, 1, 2).contiguous(),
                                                          batch_size=X.shape[0]).mean()
            else:
                loss_reconstruction = torch.mean(
                    config['reconstruction_coef'] *
                    reconstruction_loss(X.permute(0, 2, 1) + 0.5,
                                        X_rec.permute(0, 2, 1) + 0.5))

            # encoder training
            synth_logit = discriminator(codes)
            if config.get('wasserstein', True):
                loss_encoder = -torch.mean(synth_logit)
            else:
                # An alternative is c = 0 iff a = -1, b = 1
                c = 1.0
                loss_encoder = 0.5 * (synth_logit - c)**2

            if pointnet:
                regularization_loss = config['feature_regularization_coef'] * \
                                      feature_transform_regularization(feature_transform).mean()
                loss_all = loss_reconstruction + loss_encoder + regularization_loss
            else:
                loss_all = loss_reconstruction + loss_encoder

            e_hn_optimizer.zero_grad()
            encoder.zero_grad()
            hyper_network.zero_grad()

            loss_all.backward()
            e_hn_optimizer.step()

            total_loss_reconstruction += loss_reconstruction.item()
            total_loss_encoder += loss_encoder.item()
            total_loss_all += loss_all.item()

            if pointnet:
                total_loss_regularization += regularization_loss.item()

        log.info(
            f'[{epoch}/{config["max_epochs"]}] '
            f'Total_Loss: {total_loss_all / i:.4f} '
            f'Loss_R: {total_loss_reconstruction / i:.4f} '
            f'Loss_E: {total_loss_encoder / i:.4f} '
            f'Loss_D: {total_loss_discriminator / i:.4f} '
            f'Time: {datetime.now() - start_epoch_time}'
        )

        if pointnet:
            log.info(f'Loss_Regularization: {total_loss_regularization / i:.4f}')

        losses_e.append(total_loss_reconstruction)
        losses_g.append(total_loss_encoder)
        losses_eg.append(total_loss_all)
        losses_d.append(total_loss_discriminator)

        #
        # Save intermediate results
        #
        if epoch % config['save_samples_frequency'] == 0:
            log.debug('Saving samples...')

            X = X.cpu().numpy()
            X_rec = X_rec.detach().cpu().numpy()

            for k in range(min(5, X_rec.shape[0])):
                fig = plot_3d_point_cloud(X_rec[k][0], X_rec[k][1], X_rec[k][2], in_u_sphere=True, show=False,
                                          title=str(epoch))
                fig.savefig(join(results_dir, 'samples', f'{epoch}_{k}_reconstructed.png'))
                plt.close(fig)

                fig = plot_3d_point_cloud(X[k][0], X[k][1], X[k][2], in_u_sphere=True, show=False)
                fig.savefig(join(results_dir, 'samples', f'{epoch}_{k}_real.png'))
                plt.close(fig)

        if config['clean_weights_dir']:
            log.debug('Cleaning weights path: %s' % weights_path)
            shutil.rmtree(weights_path, ignore_errors=True)
            os.makedirs(weights_path, exist_ok=True)

        if epoch % config['save_weights_frequency'] == 0:
            log.debug('Saving weights and losses...')

            torch.save(hyper_network.state_dict(), join(weights_path, f'{epoch:05}_G.pth'))
            torch.save(encoder.state_dict(), join(weights_path, f'{epoch:05}_E.pth'))
            torch.save(e_hn_optimizer.state_dict(), join(weights_path, f'{epoch:05}_EGo.pth'))
            torch.save(discriminator.state_dict(), join(weights_path, f'{epoch:05}_D.pth'))
            torch.save(discriminator_optimizer.state_dict(), join(weights_path, f'{epoch:05}_Do.pth'))

            np.save(join(metrics_path, f'{epoch:05}_E'), np.array(losses_e))
            np.save(join(metrics_path, f'{epoch:05}_G'), np.array(losses_g))
            np.save(join(metrics_path, f'{epoch:05}_EG'), np.array(losses_eg))
            np.save(join(metrics_path, f'{epoch:05}_D'), np.array(losses_d))
Пример #5
0
				  batch_size=64,
				  n_input=dc.train.data.shape[-1],
				  verbose=1)
	
	if args.net == 'cnn':
		net = CNN(epochs=args.nepochs,
				  batch_size=64,
				  n_input=dc.train.data.shape[-1],
				  verbose=1)

	if args.net == 'pc':
		net = PointNet(epochs=args.nepochs,
					   batch_size=64,
					   n_points=dc.train.data.shape[1],
					   n_classes=dc.train.labels.shape[-1],
					   n_input=dc.train.data.shape[-1], 
					   verbose=1,
					   save=1,
					   noise=args.noise,
					   params=[args.p1, args.p2],
                       weights_dir=args.weights)


	print('train shape: ' + str(dc.train.data.shape))
	print('label shape: ' + str(dc.train.labels.shape))
	print('test shape: ' + str(dc.test.data.shape))
	print('label shape: ' + str(dc.test.labels.shape))

	acc = net.run(dc)

	print('final accuracy: ' + str(acc))
Пример #6
0
def main():
    nb_classes = 40
    train_file = './ModelNet40/ply_data_train.h5'
    test_file = './ModelNet40/ply_data_test.h5'

    epochs = 80
    fine_tune_epochs = 20
    batch_size = 32

    train = DataGenerator(train_file, batch_size, nb_classes, train=True)
    val = DataGenerator(test_file, batch_size, nb_classes, train=False)

    model = PointNet(nb_classes)
    model.summary()
    lr = 0.001
    adam = Adam(lr=lr)
    model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    if not os.path.exists('./results/'):
        os.mkdir('./results/')
    learning_rate_scheduler = LearningRateScheduler(schedule=schedule)
    # pre-train
    history = model.fit_generator(train.generator(),
                                  steps_per_epoch=9840 // batch_size,
                                  epochs=epochs,
                                  validation_data=val.generator(),
                                  validation_steps=2468 // batch_size,
                                  callbacks=[learning_rate_scheduler],
                                  verbose=1)

    save_history(history, './results/')
    model.save_weights('./results/pointnet_weights.h5')

    # prune weights
    # save masks for weight layers
    masks = {}
    layer_count = 0
    # not compress first convolution layer
    first_conv = True
    for layer in model.layers:
        weight = layer.get_weights()
        if len(weight) >= 2:
            if not first_conv:
                w = deepcopy(weight)
                tmp, mask = prune_weights(w[0],
                                          compress_rate=args.compress_rate)
                masks[layer_count] = mask
                w[0] = tmp
                layer.set_weights(w)
            else:
                first_conv = False
        layer_count += 1
    # evaluate model after pruning
    model.compile(optimizer=Adam(lr=0.0001),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    score = model.evaluate_generator(val.data_generator(),
                                     steps=2468 // batch_size)
    print('val loss: {}'.format(score[0]))
    print('val acc: {}'.format(score[1]))
    # fine-tune
    for i in range(fine_tune_epochs):
        for X, Y in train.data_generator():
            # train on each batch
            model.train_on_batch(X, Y)
            # apply masks
            for layer_id in masks:
                w = model.layers[layer_id].get_weights()
                w[0] = w[0] * masks[layer_id]
                model.layers[layer_id].set_weights(w)
        score = model.evaluate_generator(val.data_generator(),
                                         steps=2468 // batch_size)
        print('val loss: {}'.format(score[0]))
        print('val acc: {}'.format(score[1]))

    # save compressed weights
    compressed_name = './results/compressed_pointnet_weights'
    save_compressed_weights(model, compressed_name)
Пример #7
0
    parser = argparse.ArgumentParser(
        description='Run inference on specified dataset')
    parser.add_argument('--weights',
                        help='folder containing network weights to use',
                        type=str,
                        required=True)
    parser.add_argument('--dataset',
                        help='dataset to be used (numpy format)',
                        type=str,
                        required=True)
    parser.add_argument('--labels',
                        help='labels corresponding to dataset (numpy format)',
                        type=str,
                        required=False)

    args = parser.parse_args()

    # Load data and labels --
    d = np.load(args.dataset)
    l = np.load(args.labels)

    ic = IC(data=d, labels=l)

    pc = PointNet(n_points=ic.test.data.shape[1],
                  n_classes=ic.test.labels.shape[-1],
                  n_input=3,
                  weights_dir=args.weights)

    acc = pc.inference(ic, conf_matrix=True)
    print('inference accuracy: ' + str(acc))
Пример #8
0
    if not os.path.exists(args.weights):
        print('creating weight directory ' + str(args.weights))
        os.makedirs(args.weights)

    print('loading numpy data...')
    data = np.load(args.dataset)
    labels = np.load(args.labels)

    print('converting to DataContainer format...')
    dc = DC(data=data, labels=labels)

    net = PointNet(epochs=args.nepochs,
                   batch_size=64,
                   lr=args.learning_rate,
                   n_points=dc.train.data.shape[1],
                   n_classes=dc.train.labels.shape[-1],
                   n_input=dc.train.data.shape[-1],
                   verbose=1,
                   save=1,
                   weights_dir=args.weights)

    print('train shape: ' + str(dc.train.data.shape))
    print('label shape: ' + str(dc.train.labels.shape))
    print('test shape: ' + str(dc.test.data.shape))
    print('label shape: ' + str(dc.test.labels.shape))

    acc = net.run(dc)

    print('final accuracy: ' + str(acc))
Пример #9
0
from torch.utils.data import DataLoader
from torch import nn, optim
from models.pointnet import PointNet
from dataloader import ModelNet40
import torch

BATCH_SIZE = 64
NUM_POINTS = 2048  # 点群の点数
NUM_LABELS = 40  # labelの種類数

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

net = PointNet(NUM_POINTS, NUM_LABELS)
net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.001)

loss_l = []
accurary_l = []

trainloader = DataLoader(ModelNet40("data/modelnet40_ply_hdf5_2048/"),
                         batch_size=64,
                         shuffle=True)

for epoch in range(2):
    running_loss = 0.0

    for i, data in enumerate(trainloader, 0):
        inputs, labels = data[0].to(device), data[1].to(device)
        optimizer.zero_grad()
        outputs = net(inputs.view(-1, 3))
Пример #10
0
    parser.add_argument('--nclass', help='number of classes', type=int, required=True)

    args = parser.parse_args()

    # Load data and labels -- 
    d = np.load(args.dataset)
    # Extract just coords
    dataset = d[:,:,2:]
    print("Dataset shape:")
    print(dataset.shape)
    nsamples = dataset.shape[0]
    npoints = dataset.shape[1]
    # Extract just atom/frids
    ids = d[:,0,:2]

    pc = PointNet(n_points=npoints, n_classes=args.nclass, weights_dir=args.weights)

    result = pc.infer_nolabel(dataset)
    np_result = np.asarray(result)
    np_result = np_result.reshape((nsamples,1))
    print("Result shape:")
    print(np_result.shape)

    print("Ids shape:")
    print(ids.shape)

    final = np.hstack((ids,np_result))

    f = open("classification.out",'w')
    for i in range(len(final)):
        # Type '0' is liquid -- don't write this to keep classification.out