Пример #1
0
def real_world_data_test():
    matr = io.loadmat('testcloud_realcrop.mat')
    pc = matr['single_example']
    pc_model_format = np.expand_dims(pc, axis=0)
    ds = tf.data.Dataset.from_tensor_slices(
        (tf.convert_to_tensor(pc_model_format.astype(np.float32))))
    batchds = ds.batch(1, drop_remainder=True)

    model_nr = 120
    model = SEM_SEG_Model(config['batch_size'], config['num_classes'],
                          config['bn'])
    logdir = './logs/{}/model/weights'.format(config['log_dir'])
    model.build((config['batch_size'], 57600, 3))
    model.compile(optimizer=keras.optimizers.Adam(config['lr']),
                  loss=keras.losses.SparseCategoricalCrossentropy(),
                  metrics=[
                      keras.metrics.SparseCategoricalAccuracy(),
                      UpdatedMeanIoU(num_classes=config['num_classes'])
                  ])
    save_string = logdir + "/saved-model-{}".format(model_nr)
    model.load_weights(save_string)

    pred = model.predict(batchds)
    pred_labels = np.argmax(pred[0, :, :], axis=1)
    pcd = o3d.geometry.PointCloud()
    pcd.points = o3d.utility.Vector3dVector(pc)
    rgb_codes = np.array([(0.271, 0.372, 0.631), (0.94, 0.94, 0.114),
                          (1, 1, 1), (0.835, 0.345, 0.243), (0, 0, 0)])
    pcd.colors = o3d.utility.Vector3dVector(rgb_codes[pred_labels])
    o3d.io.write_point_cloud("cloud_vis/realworld_pred_cloud.pcd", pcd)
def train():

	model = SEM_SEG_Model(config['batch_size'], config['num_classes'], config['bn'])

	train_ds = load_dataset(config['train_ds'], config['batch_size'])
	val_ds = load_dataset(config['val_ds'], config['batch_size'])
	
	callbacks = [
		keras.callbacks.TensorBoard(
			'./logs/{}'.format(config['log_dir']), update_freq=50),
		keras.callbacks.ModelCheckpoint(
			'./logs/{}/model/weights'.format(config['log_dir']), 'val_sparse_categorical_accuracy', save_best_only=True)
	]

	model.build((config['batch_size'], 8192, 3))
	print(model.summary())

	model.compile(
		optimizer=keras.optimizers.Adam(config['lr']),
		loss=keras.losses.SparseCategoricalCrossentropy(),
		metrics=[keras.metrics.SparseCategoricalAccuracy()]
	)

	model.fit(
		train_ds,
		validation_data=val_ds,
		validation_steps=10,
		validation_freq=1,
		callbacks=callbacks,
		epochs=100,
		verbose=1
	)
def train(config, params):

    model = SEM_SEG_Model(params['batch_size'], params['num_points'],
                          params['num_classes'], params['bn'])

    model.build(input_shape=(params['batch_size'], params['num_points'], 3))
    print(model.summary())
    print('[info] model training...')

    optimizer = tf.keras.optimizers.Adam(lr=params['lr'])
    loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
    acc_object = tf.keras.metrics.SparseCategoricalAccuracy()

    train_ds = TFDataset(os.path.join(config['dataset_dir'], 'train.tfrecord'),
                         params['batch_size'], 'scannet')
    val_ds = TFDataset(os.path.join(config['dataset_dir'], 'val.tfrecord'),
                       params['batch_size'], 'scannet')

    train_summary_writer = tf.summary.create_file_writer(
        os.path.join(config['log_dir'], config['log_code']))

    with train_summary_writer.as_default():

        while True:

            train_pts, train_labels = train_ds.get_batch()

            loss, acc = train_step(optimizer, model, loss_object, acc_object,
                                   train_pts, train_labels)

            if optimizer.iterations % config['log_freq'] == 0:
                tf.summary.scalar('train loss',
                                  loss,
                                  step=optimizer.iterations)
                tf.summary.scalar('train accuracy',
                                  acc,
                                  step=optimizer.iterations)

            if optimizer.iterations % config['test_freq'] == 0:

                test_pts, test_labels = val_ds.get_batch()

                test_loss, test_acc = test_step(optimizer, model, loss_object,
                                                acc_object, test_pts,
                                                test_labels)

                tf.summary.scalar('test loss',
                                  test_loss,
                                  step=optimizer.iterations)
                tf.summary.scalar('test accuracy',
                                  test_acc,
                                  step=optimizer.iterations)
Пример #4
0
def visualise_predictions(dataset, cloudnr):
    model_nr = 120
    model = SEM_SEG_Model(config['batch_size'], config['num_classes'],
                          config['bn'])
    logdir = './logs/{}/model/weights'.format(config['log_dir'])
    model.build((config['batch_size'], 57600, 3))
    model.compile(optimizer=keras.optimizers.Adam(config['lr']),
                  loss=keras.losses.SparseCategoricalCrossentropy(),
                  metrics=[
                      keras.metrics.SparseCategoricalAccuracy(),
                      UpdatedMeanIoU(num_classes=config['num_classes'])
                  ])
    save_string = logdir + "/saved-model-{}".format(model_nr)
    model.load_weights(save_string)
    test_batched = dataset.batch(config['batch_size'], drop_remainder=True)

    batch = next(iter(test_batched))
    cloud_nr = cloudnr
    example = np.expand_dims(batch[0][cloud_nr, :, :], axis=0)
    prediction = model.call(example)
    pred_labels = np.argmax(prediction[0, :, :], axis=1)
    true_labels = batch[1][cloud_nr, :, :]

    rgb_codes = np.array([(0.271, 0.372, 0.631), (0.94, 0.94, 0.114),
                          (1, 1, 1), (0.835, 0.345, 0.243), (0, 0, 0)])
    pcd = o3d.geometry.PointCloud()
    xyz = np.squeeze(example)
    pcd.points = o3d.utility.Vector3dVector(xyz)
    pcd.colors = o3d.utility.Vector3dVector(rgb_codes[pred_labels])
    pcd_truth = copy.deepcopy(pcd)
    pcd_truth.colors = o3d.utility.Vector3dVector(
        rgb_codes[np.squeeze(true_labels)])

    import pdb
    pdb.set_trace()
    o3d.io.write_point_cloud(
        "cloud_vis/{}_pred_cloud.pcd".format(config['dataset_name']), pcd)
    o3d.io.write_point_cloud(
        "cloud_vis/{}_truth_cloud.pcd".format(config['dataset_name']),
        pcd_truth)

    #o3d.visualization.draw_geometries([pcd])
    #o3d.visualization.draw_geometries([pcd_truth])

    error_vector = np.where(pred_labels == np.squeeze(true_labels), 4, 0)
    error_pcd = copy.deepcopy(pcd)
    error_pcd.colors = o3d.utility.Vector3dVector(rgb_codes[error_vector])
    o3d.io.write_point_cloud(
        "cloud_vis/{}_error_cloud.pcd".format(config['dataset_name']),
        error_pcd)
    #o3d.visualization.draw_geometries([pcd])
    import pdb
    pdb.set_trace()
Пример #5
0
def train():

	model = SEM_SEG_Model(config['batch_size'], config['num_classes'], config['bn'])

	#train_ds = load_dataset(config['train_ds'], config['batch_size'])
	#val_ds = load_dataset(config['val_ds'], config['batch_size'])

	train_ds = load_dummy_data(config['train_ds'], config['batch_size'])
	val_ds = load_dummy_data(config['train_ds'], config['batch_size'])

	# thingy = list(val_ds.as_numpy_iterator())
	# thingy[0][0][:,0:100,:].shape
	# model.call(thingy[0][0][:,0:100,:])
	# model.call(thingy[0][0][0:1,0:100,:]) # only a single batch, size can be changed

	import pdb; pdb.set_trace()


	callbacks = [
		keras.callbacks.TensorBoard(
			'./logs/{}'.format(config['log_dir']), update_freq=50),
		keras.callbacks.ModelCheckpoint(
			'./logs/{}/model/weights'.format(config['log_dir']), 'val_sparse_categorical_accuracy', save_best_only=True)
	]

	model.build((config['batch_size'], 8192, 3))
	print(model.summary())

	model.compile(
		optimizer=keras.optimizers.Adam(config['lr']),
		loss=keras.losses.SparseCategoricalCrossentropy(),
		metrics=[keras.metrics.SparseCategoricalAccuracy()]
	)

	model.fit(
		train_ds,
		validation_data=val_ds,
		validation_steps=10,
		validation_freq=1,
		callbacks=callbacks,
		epochs=100,
		verbose=1
	)
Пример #6
0
def test_net(test_set):
    model_nr = 120
    model = SEM_SEG_Model(config['batch_size'], config['num_classes'],
                          config['bn'])
    logdir = './logs/{}/model/weights'.format(config['log_dir'])
    model.build((config['batch_size'], 57600, 3))
    model.compile(optimizer=keras.optimizers.Adam(config['lr']),
                  loss=keras.losses.SparseCategoricalCrossentropy(),
                  metrics=[
                      keras.metrics.SparseCategoricalAccuracy(),
                      UpdatedMeanIoU(num_classes=config['num_classes'])
                  ])
    save_string = logdir + "/saved-model-{}".format(model_nr)
    model.load_weights(save_string)
    test_batched = test_set.batch(config['batch_size'], drop_remainder=True)
    pred = model.predict(test_batched)

    #results = model.evaluate(test_batched)
    import pdb
    pdb.set_trace()
    #print("test set metrics: " + results)
    precision_recall_curve(pred, test_batched)
    calc_confusion_matrix(pred, test_batched)
Пример #7
0
def train(config, params):

    model = SEM_SEG_Model(params['batch_size'], params['num_points'],
                          params['num_classes'], params['bn'])

    # 入力に依存しない初期化=重みなどの初期化
    #model.build(input_shape=(params['batch_size'], params['num_points'], 3))
    model.build(input_shape=(params['batch_size'], params['num_points'], 9))
    print(model.summary())
    print('[info] model training...')

    optimizer = tf.keras.optimizers.Adam(lr=params['lr'])
    loss_object = tf.keras.losses.SparseCategoricalCrossentropy()

    train_loss = tf.keras.metrics.Mean()
    test_loss = tf.keras.metrics.Mean()
    train_acc = tf.keras.metrics.SparseCategoricalAccuracy()
    test_acc = tf.keras.metrics.SparseCategoricalAccuracy()

    dataset_train_path = [
        os.path.join(config['dataset_dir'], p)
        for p in os.listdir(config['dataset_dir'])
    ]
    train_ds = TFDataset(dataset_train_path, params['batch_size'],
                         config['dataset_name'])
    val_ds = TFDataset(config['test_tfrecords'], params['batch_size'],
                       config['dataset_name'])
    train_summary_writer = tf.summary.create_file_writer(
        os.path.join(config['log_dir'], config['log_code'], 'train'))

    test_summary_writer = tf.summary.create_file_writer(
        os.path.join(config['log_dir'], config['log_code'], 'test'))
    """
    a = np.ones((16, 512, 1), dtype=float) * 4
    b = np.ones((16, 512, 1), dtype=float)
    c = np.concatenate([a,b],2)
    d = tf.constant(c, dtype=tf.float32)
    """

    template = 'Epoch {}\n Loss: {}, Accuracy: {}\n Test Loss: {}, Test Accuracy: {}'
    if config["weight_load"]:
        model.load_weights(config["checkpointpath_load"])
    #for epoch in range(1000):
    while True:
        train_features, train_labels = train_ds.get_batch()

        loss, train_acc = train_step(optimizer, model, loss_object, train_loss,
                                     train_acc, train_features, train_labels)
        """
        if optimizer.iterations % 10000 == 0:
            optimizer.lr = optimizer.lr - optimizer.lr / 4
            print(optimizer.lr)
        """

        if optimizer.iterations % config['log_freq'] == 0:
            with train_summary_writer.as_default():
                tf.summary.scalar('loss',
                                  train_loss.result(),
                                  step=optimizer.iterations)
                tf.summary.scalar('accuracy',
                                  train_acc.result(),
                                  step=optimizer.iterations)

        if optimizer.iterations % config['test_freq'] == 0:

            test_features, test_labels = val_ds.get_batch()

            test_loss, test_acc = test_step(optimizer, model, loss_object,
                                            test_loss, test_acc, test_features,
                                            test_labels)

            with test_summary_writer.as_default():

                tf.summary.scalar('loss',
                                  test_loss.result(),
                                  step=optimizer.iterations)
                tf.summary.scalar('accuracy',
                                  test_acc.result(),
                                  step=optimizer.iterations)

            print(
                template.format(int(optimizer.iterations), train_loss.result(),
                                train_acc.result() * 100, test_loss.result(),
                                test_acc.result() * 100))
            if optimizer.iterations % 2000 == 0:
                checkpointpath = "checkpoint_cat_%08d" % (optimizer.iterations)
                checkpointpath = os.path.join(config["checkpointdir"],
                                              checkpointpath)
                model.save_weights(checkpointpath)
                if optimizer.iterations % 400000 == 0:
                    break
Пример #8
0
def train(config, params):

    model = SEM_SEG_Model(params['batch_size'], params['num_points'],
                          params['num_classes'], params['bn'])
    model.build(input_shape=(params['batch_size'], params['num_points'], 9))
    print(model.summary())
    print('[info] model training...')

    optimizer = tf.keras.optimizers.Adam(lr=params['lr'])
    loss_object = tf.keras.losses.SparseCategoricalCrossentropy()

    train_loss = tf.keras.metrics.Mean()
    test_loss = tf.keras.metrics.Mean()
    train_acc = tf.keras.metrics.SparseCategoricalAccuracy()
    test_acc = tf.keras.metrics.SparseCategoricalAccuracy()

    dataset_path = [
        os.path.join(config["test_tfrecords_dir"], p)
        for p in os.listdir(config["test_tfrecords_dir"])
    ]
    #train_ds = TFDataset(config['test_tfrecords'], params['batch_size'], config['dataset_name'])
    train_ds = TFDataset(dataset_path, params['batch_size'],
                         config['dataset_name'])

    template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
    model.load_weights(config["checkpointpath_load"])

    dataset_num = train_ds.dataset_num

    steps = int(dataset_num / params["batch_size"]) + 1
    print(steps)
    dic = {}
    for epoch in range(steps):
        train_features, train_labels, centroid, max_dis = train_ds.get_batch_test(
        )

        pred, loss, train_acc = train_step(optimizer, model, loss_object,
                                           train_loss, train_acc,
                                           train_features, train_labels)
        pred_np = pred.numpy()
        train_features_np = train_features.numpy()  # (batch, 512, 9)
        centroid_np = centroid.numpy()
        max_dis_np = max_dis.numpy()
        size0 = train_features_np.shape[0]
        size1 = train_features_np.shape[1]
        if (size0 < 1) or (size1 < 1):
            print("error", epoch)
            continue

        for b in range(size0):
            for i in range(size1):
                features_one = train_features_np[b][i]
                p = tuple(
                    np.round(features_one[:3] * max_dis_np[b] + centroid_np[b],
                             1))
                if p not in dic.keys():
                    dic[p] = [
                        features_one[3:6], features_one[6:], pred_np[b][i]
                    ]
                else:
                    outlier_pred = min(dic[p][2][0], pred_np[b][i][0])
                    inlier_pred = max(dic[p][2][1], pred_np[b][i][1])
                    dic[p][2] = np.array([outlier_pred, inlier_pred])
        if epoch % 1000 == 0:
            print(epoch)

    print("recon")
    pcd_fromnp = dic2pcd(dic)
    o3d.io.write_point_cloud(
        "cat_test_result_batch_repeat_repeat30k_round1.ply", pcd_fromnp)
Пример #9
0
def train(train_ds, val_ds, epochs, num_classes, resume_training):
    model = SEM_SEG_Model(config['batch_size'], config['num_classes'],
                          config['bn'])
    logdir = './logs/{}/model/weights'.format(config['log_dir'])
    cppath = logdir + "/saved-model-{epoch:02d}"

    callbacks = [
        keras.callbacks.TensorBoard('./logs/{}'.format(config['log_dir']),
                                    update_freq=25),
        keras.callbacks.ModelCheckpoint(cppath, save_best_only=False),
    ]

    model.build((config['batch_size'], 57600, 3))
    print(model.summary())

    model.compile(optimizer=keras.optimizers.Adam(config['lr']),
                  loss=keras.losses.SparseCategoricalCrossentropy(),
                  metrics=[
                      keras.metrics.SparseCategoricalAccuracy(),
                      UpdatedMeanIoU(num_classes=num_classes)
                  ])

    if resume_training == True:
        model.load_weights(logdir)

    model.fit(train_ds,
              validation_data=val_ds,
              validation_steps=10,
              validation_freq=1,
              callbacks=callbacks,
              initial_epoch=config['starting_epoch'],
              epochs=epochs,
              verbose=1)
Пример #10
0
# some dummy data
data = np.random.rand(1080, 1080, 3)
labels = np.random.randint(5, size=(1080, 1080, 1))
#unroll the point clouds into 1 dimension
data = data.reshape(data.shape[0] * data.shape[1], data.shape[2])
labels = labels.reshape(labels.shape[0] * labels.shape[1], labels.shape[2])
# stack multiple examples to a whole set
inp = np.stack((data, data, data))
outp = np.stack((labels, labels, labels))
weirdin = (tf.convert_to_tensor(inp), tf.convert_to_tensor(outp))
dataset = tf.data.Dataset.from_tensor_slices(weirdin)
batches = dataset.batch(3, drop_remainder=True)

pdb.set_trace()

model = SEM_SEG_Model(2, 5)
callbacks = [
    keras.callbacks.TensorBoard('./logs/{}'.format('scannet_1'),
                                update_freq=50),
    keras.callbacks.ModelCheckpoint(
        './logs/{}/model/weights'.format('scannet_1'),
        'val_sparse_categorical_accuracy',
        save_best_only=True)
]

model.build((1, 1166400, 3))
print(model.summary())

model.compile(optimizer=keras.optimizers.Adam(0.001),
              loss=keras.losses.SparseCategoricalCrossentropy(),
              metrics=[keras.metrics.SparseCategoricalAccuracy()])