Esempio n. 1
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True
    
    log_string('----')
    current_data, current_label, _ = provider.shuffle_data(train_data[:,0:NUM_POINT,:], train_label) 
    
    file_size = current_data.shape[0]
    num_batches = file_size // BATCH_SIZE
    
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    
    for batch_idx in range(num_batches):
        if batch_idx % 100 == 0:
            print('Current batch/total batch num: %d/%d'%(batch_idx,num_batches))
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx+1) * BATCH_SIZE
        
        feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
                     ops['labels_pl']: current_label[start_idx:end_idx],
                     ops['is_training_pl']: is_training,}
        summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred']],
                                         feed_dict=feed_dict)
        train_writer.add_summary(summary, step)
        pred_val = np.argmax(pred_val, 2)
        correct = np.sum(pred_val == current_label[start_idx:end_idx])
        total_correct += correct
        total_seen += (BATCH_SIZE*NUM_POINT)
        loss_sum += loss_val
    
    log_string('mean loss: %f' % (loss_sum / float(num_batches)))
    log_string('accuracy: %f' % (total_correct / float(total_seen)))
Esempio n. 2
0
def make_ModelNet_data_A_B(err_num, batch_size):
    ModelNet_TRAIN_FILES = provider.getDataFiles(
        '../data/modelnet40_ply_hdf5_2048/train_files.txt')

    class_data = {l: [] for l in range(40)}
    for data_set in ModelNet_TRAIN_FILES:
        file = h5py.File('..//' + data_set, 'r')
        data = file['data'][...]
        label = np.reshape(file['label'][...], -1)
        normal = file['normal'][...]
        for i in range(data.shape[0]):
            class_data[label[i]].append(data[i])
        file.close()

    for i in range(40):
        class_data[i] = np.array(class_data[i])

    j = 0
    for data_set in ModelNet_TRAIN_FILES:
        print('making ModelNet_A_B_' + str(j) + '.h5')
        file = h5py.File('..//' + data_set, 'r')
        data_A = file['data'][...]
        label_A = file['label'][...]

        data_A, label_A, _ = provider.shuffle_data(data_A, label_A)
        ERR = err_num * int(data_A.shape[0] / 9843)
        data_B, label_B = provider.get_data_with_err_ModelNet(
            label_A, class_data, err_num)
        label_B = np.reshape(label_B, [-1, 1])
        data_A_B = np.zeros([2 * data_A.shape[0], 2048, 3])
        label_A_B = np.zeros([2 * data_A.shape[0], 1])

        for i in range(int(data_A.shape[0] / (batch_size / 2))):
            data_A_B[int((i + 0) *
                         batch_size):int((i + 0.5) * batch_size)] = data_A[int(
                             (i + 0) * (batch_size / 2)):int((i + 1) *
                                                             (batch_size / 2))]
            data_A_B[int((i + 0.5) *
                         batch_size):int((i + 1) * batch_size)] = data_B[int(
                             (i + 0) * (batch_size / 2)):int((i + 1) *
                                                             (batch_size / 2))]
            label_A_B[int((i + 0) * batch_size):int(
                (i + 0.5) * batch_size)] = label_A[int(
                    (i + 0) * (batch_size / 2)):int((i + 1) *
                                                    (batch_size / 2))]
            label_A_B[int((i + 0.5) *
                          batch_size):int((i + 1) * batch_size)] = label_B[int(
                              (i + 0) *
                              (batch_size / 2)):int((i + 1) *
                                                    (batch_size / 2))]

        data_set_A_B = h5py.File('data/ModelNet_A_B_' + str(j) + '.h5', 'w')
        data_set_A_B['data'] = data_A_B
        data_set_A_B['label'] = label_A_B
        data_set_A_B.close()
        file.close()
        j += 1
def getTestData(test_file_idxs):
	# for fn in range(len(TEST_FILES)):
	test_data, test_label = provider.loadDataFile(TEST_FILES[test_file_idxs[0]])
	test_data = test_data[:, 0:NUM_POINT, :]
	test_data, test_label, _ = provider.shuffle_data(test_data, np.squeeze(test_label))
	test_data = test_data[:, :, :, np.newaxis]
	test_label = np.squeeze(test_label)
	test_label = keras.utils.to_categorical(test_label, num_classes=40)
	return test_data,test_label
Esempio n. 4
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)

    log_string(str(datetime.now()))
    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(
            TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_data, current_label, _ = provider.shuffle_data(
            current_data, np.squeeze(current_label))
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE

        total_correct = 0
        total_seen = 0
        loss_sum = 0

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            # Augment batched point clouds by rotation and jittering
            rotated_data = provider.rotate_point_cloud(
                current_data[start_idx:end_idx, :, :])
            jittered_data = provider.random_scale_point_cloud(rotated_data)
            jittered_data = provider.rotate_perturbation_point_cloud(
                jittered_data)
            jittered_data = provider.shift_point_cloud(jittered_data)
            jittered_data = provider.jitter_point_cloud(jittered_data)
            feed_dict = {
                ops['pointclouds_pl']: jittered_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
            }
            summary, step, _, loss_val, pred_val = sess.run(
                [
                    ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                    ops['pred']
                ],
                feed_dict=feed_dict)
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val

        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))
Esempio n. 5
0
def train_one_epoch(sess, ops, gmm, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)

    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]], compensate = False)
        # points_idx = range(0,NUM_POINT)
        points_idx = np.random.choice(range(0,2048),NUM_POINT)
        current_data = current_data[:, points_idx, :]
        current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size / BATCH_SIZE

        loss_sum = 0

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            # Augment batched point clouds by rotation and jittering

            augmented_data = current_data[start_idx:end_idx, :, :]
            if augment_scale:
                augmented_data = provider.scale_point_cloud(augmented_data, smin=0.66, smax=1.5)
            if augment_rotation:
                augmented_data = provider.rotate_point_cloud(augmented_data)
            if augment_translation:
                augmented_data = provider.translate_point_cloud(augmented_data, tval = 0.2)
            if augment_jitter:
                augmented_data = provider.jitter_point_cloud(augmented_data, sigma=0.01,
                                                        clip=0.05)  # default sigma=0.01, clip=0.05
            if augment_outlier:
                augmented_data = provider.insert_outliers_to_point_cloud(augmented_data, outlier_ratio=0.02)



            feed_dict = {ops['points_pl']: augmented_data,
                         ops['labels_pl']: current_label[start_idx:end_idx],
                         ops['w_pl']: gmm.weights_,
                         ops['mu_pl']: gmm.means_,
                         ops['sigma_pl']: np.sqrt(gmm.covariances_),
                         ops['is_training_pl']: is_training, }
            summary, step, _, loss_val, reconstructed_points_val = sess.run([ops['merged'], ops['step'],
                                                             ops['train_op'], ops['loss'], ops['reconstructed_points']],
                                                            feed_dict=feed_dict)
            train_writer.add_summary(summary, step)

            loss_sum += loss_val

        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
Esempio n. 6
0
def make_ScanObjectNN_data_batch_A_B(err_num, batch_size):
    class_data = {l: [] for l in range(15)}
    file = h5py.File(
        '../data/h5_files/main_split/training_objectdataset_augmentedrot_scale75.h5',
        'r')
    data_A = file['data'][...]
    label = np.reshape(file['label'][...], -1)
    for i in range(data_A.shape[0]):
        class_data[label[i]].append(data_A[i])

    for i in range(15):
        class_data[i] = np.array(class_data[i])

    print('making ScanObject_A_B.h5')

    label_A = np.reshape(label, [-1, 1])
    data_A, label_A, _ = provider.shuffle_data(data_A, label_A)
    ERR = int(err_num / batch_size)

    data_B = np.zeros(
        [int((data_A.shape[0]) / batch_size) * batch_size, 2048, 3])
    label_B = np.zeros([int((data_A.shape[0]) / batch_size) * batch_size])
    for i in range(int(label_A.shape[0] / (batch_size / 2))):
        data_B[int(i*(batch_size/2)):int((i+1)*(batch_size/2))], \
        label_B[int(i*(batch_size/2)):int((i+1)*(batch_size/2))] = \
        provider.get_data_with_err_ScanObjectNN(label_A[int(i*(batch_size/2)):int((i+1)*(batch_size/2))],class_data,ERR)

    label_B = np.reshape(label_B, [-1, 1])
    data_A_B = np.zeros(
        [int((2 * data_A.shape[0]) / batch_size) * batch_size, 2048, 3])
    label_A_B = np.zeros(
        [int((2 * data_A.shape[0]) / batch_size) * batch_size, 1])

    for i in range(int(data_A.shape[0] / (batch_size / 2))):
        data_A_B[int((i + 0) *
                     batch_size):int((i + 0.5) * batch_size)] = data_A[int(
                         (i + 0) * (batch_size / 2)):int((i + 1) *
                                                         (batch_size / 2))]
        data_A_B[int((i + 0.5) *
                     batch_size):int((i + 1) * batch_size)] = data_B[int(
                         (i + 0) * (batch_size / 2)):int((i + 1) *
                                                         (batch_size / 2))]
        label_A_B[int((i + 0) *
                      batch_size):int((i + 0.5) * batch_size)] = label_A[int(
                          (i + 0) * (batch_size / 2)):int((i + 1) *
                                                          (batch_size / 2))]
        label_A_B[int((i + 0.5) *
                      batch_size):int((i + 1) * batch_size)] = label_B[int(
                          (i + 0) * (batch_size / 2)):int((i + 1) *
                                                          (batch_size / 2))]

    data_set_A_B = h5py.File('data/ScanObjectNN_A_B.h5', 'w')
    data_set_A_B['data'] = data_A_B
    data_set_A_B['label'] = label_A_B
    data_set_A_B.close()
    file.close()
Esempio n. 7
0
def train_one_epoch(config, sess, ops, epoch):
    is_training = True

    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)

    for fn in range(len(TRAIN_FILES)):
        current_data, current_label = provider.loadDataFile(
            TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:, 0:config.num_points, :]
        current_data, current_label, _ = provider.shuffle_data(
            current_data, np.squeeze(current_label))
        current_label = np.squeeze(current_label)
        file_size = current_data.shape[0]
        num_batches = file_size // config.batch_size

        total_correct = 0
        total_seen = 0
        losses = []

        for batch_idx in range(num_batches):
            start_idx = batch_idx * config.batch_size
            end_idx = (batch_idx + 1) * config.batch_size

            # Augment batched point clouds by rotation and jittering
            rotated_data = provider.rotate_point_cloud(
                current_data[start_idx:end_idx, :, :])
            jittered_data = provider.jitter_point_cloud(rotated_data)

            feed_dict = {
                ops['pointclouds_pl']: jittered_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
            }
            step, _, loss_val, pred_val = sess.run(
                [ops['step'], ops['train_op'], ops['loss'], ops['pred']],
                feed_dict=feed_dict)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += config.batch_size
            losses.append(loss_val)
            if batch_idx % max(config.train_log_frq / config.batch_size,
                               1) == 0:
                acc = total_correct / float(total_seen)
                loss = np.mean(losses)
                losses = []
                log(
                    config.log_file,
                    'TRAINING EPOCH {} - accuracy: {}    loss: {}'.format(
                        epoch, acc, loss))
                LOSS_LOGGER.log(loss, epoch, "train_loss")
                ACC_LOGGER.log(acc, epoch, "train_accuracy")
def trainDataPreHandle(train_file_idxs):
	current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs])
	current_data = current_data[:, 0:NUM_POINT, :]  #chose data
	current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
	current_data = provider.rotate_point_cloud(current_data)
	current_data = provider.jitter_point_cloud(current_data)
	current_data = current_data[:, :, :, np.newaxis]
	current_label = np.squeeze(current_label)	#label
	current_label = keras.utils.to_categorical(current_label, num_classes=40) #40 classes
	print("load data", current_data.shape, current_label.shape)
	return current_data,current_label
Esempio n. 9
0
    def train_one_epoch():
        # Shuffle training files to vary the order of training files(hdf5) at each epoch
        train_file_idxs = np.arange(0, len(TRAIN_FILES))
        np.random.shuffle(train_file_idxs)

        for fn in range(len(TRAIN_FILES)):
            log_string('----' + str(fn) + '-----', log_file)
            current_data, current_label = provider.loadDataFile(
                TRAIN_FILES[train_file_idxs[fn]])
            current_data = current_data[:, 0:NUM_POINT, :]
            current_data, current_label, _ = provider.shuffle_data(
                current_data, np.squeeze(current_label))
            current_label = np.squeeze(current_label)
            #sess.run(iterator.initializer, feed_dict={data_pl: current_data, label_pl: current_label})

            total_num_samples = current_data.shape[0]
            num_batches = total_num_samples // BATCH_SIZE
            total_correct = 0
            total_seen = 0
            all_total_loss = 0
            all_classify_loss = 0

            for batch_idx in range(num_batches):
                total_seen += BATCH_SIZE
                start_idx = batch_idx * BATCH_SIZE
                end_idx = (batch_idx + 1) * BATCH_SIZE
                rotated_data = provider.rotate_point_cloud(
                    current_data[start_idx:end_idx])
                jittered_data = provider.jitter_point_cloud(rotated_data)
                current_batch_data = jittered_data
                current_batch_label = current_label[start_idx:end_idx]
                # display_point(current_batch_data[0], 127 * np.ones_like(current_batch_data[0]))
                _, step, total_loss_train, classify_loss_train, mat_diff_loss_train, summary_train, logits_train = \
                    sess.run([optim_op, batch, total_loss_ts, classify_loss_ts, mat_diff_loss_ts, merged_summary, logits_ts],
                             feed_dict={pts_pl: current_batch_data,
                                        labels_pl: current_batch_label,
                                        is_training_pl: True,
                                        keepprob_pl: 0.7})
                train_writer.add_summary(summary_train, step)
                all_total_loss += total_loss_train
                all_classify_loss += classify_loss_train
                total_correct += np.sum(
                    np.argmax(logits_train, 1) == current_batch_label)

            log_string(
                'mean total loss: {:.4f}'.format(all_total_loss /
                                                 float(num_batches)), log_file)
            log_string(
                'mean classify loss: {:.4f}'.format(
                    all_classify_loss / float(num_batches)), log_file)
            log_string(
                'accuracy: {:.4f}'.format(total_correct / float(total_seen)),
                log_file)
Esempio n. 10
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)
    loss_sum = 0
    total_seen = 0
    for fn in range(len(TRAIN_FILES)):
        #for fn in range(1): #use only first file for less data
        #log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(
            TRAIN_FILES[train_file_idxs[fn]])
        #current_data, current_label = provider.loadDataFile(TRAIN_FILES[0])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_data, current_label, _ = provider.shuffle_data(
            current_data, np.squeeze(current_label))
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE

        total_correct = 0
        current_data_orig = np.copy(current_data)
        #sort the goal pointcloud
        for i in range(len(current_data_orig)):
            current_data_orig[i] = current_data_orig[i][np.lexsort(
                np.fliplr(current_data_orig[i]).T)]

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            # Augment batched point clouds by rotation and jittering
            rotated_data = provider.rotate_point_cloud(
                current_data[start_idx:end_idx, :, :])
            #jittered_data = provider.jitter_point_cloud(current_data[start_idx:end_idx, :, :])
            #jittered_data = current_data[start_idx:end_idx, :, :]
            jittered_data = rotated_data
            feed_dict = {
                ops['pointclouds_pl']: jittered_data,
                ops['goal_pcs']: current_data_orig[start_idx:end_idx, :, :],
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
            }
            summary, step, _, loss_val, pred_val, encoding = sess.run(
                [
                    ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                    ops['pred'], ops['enc']
                ],
                feed_dict=feed_dict)
            train_writer.add_summary(summary, step)
Esempio n. 11
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)

    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(
            os.path.join('../data/modelnet40_ply_hdf5_2048/',
                         TRAIN_FILES[train_file_idxs[fn]]))
        current_data = current_data[:, 0:NUM_POINT, :]
        current_data, current_label = provider.shuffle_data(
            current_data, np.squeeze(current_label))
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE

        total_correct = 0
        total_seen = 0
        loss_sum = 0

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            # rotation
            rotated_data = provider.rotate_point_cloud(
                current_data[start_idx:end_idx, :, :])  # z rotation

            feed_dict = {
                ops['pointclouds_pl']: rotated_data,
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
            }
            summary, step, _, loss_val, pred_val = sess.run(
                [
                    ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                    ops['prediction_op']
                ],
                feed_dict=feed_dict)
            train_writer.add_summary(summary, step)

            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val

        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))
Esempio n. 12
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True
    
    # Shuffle train samples
    train_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_idxs)

    total_correct = 0
    total_seen = 0
    loss_sum = 0
    y_val=[]
    
    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        current_file = os.path.join(H5_DIR,TRAIN_FILES[train_idxs[fn]])
        current_data, current_label, current_global = provider.load_h5(current_file,'class',glob=True)
        current_data, current_label,current_global, _ = provider.shuffle_data(current_data, np.squeeze(current_label),global_pl=current_global)
        current_label = np.squeeze(current_label)
        
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
                                                                
        log_string(str(datetime.now()))
        
          
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx+1) * BATCH_SIZE
            batch_data, batch_label,batch_global = get_batch(current_data, current_label,current_global, start_idx, end_idx)
          
            feed_dict = {ops['pointclouds_pl']: batch_data,
                         ops['labels_pl']: batch_label,
                         ops['is_training_pl']: is_training,                         
                         ops['global_pl']:batch_global,
                        
            }
            summary, step, _, loss_val, pred_val, coefs, coefs2 = sess.run([ops['merged'], ops['step'],
                                                                            ops['train_op'], ops['loss'],
                                                                            ops['pred'],
                                                                            ops['coefs'],ops['coefs2']],
                                                                   feed_dict=feed_dict)

            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            
            correct = np.sum(pred_val == batch_label)
            total_correct += correct
            total_seen += (BATCH_SIZE)
            loss_sum += np.mean(loss_val)

    log_string('mean loss: %f' % (loss_sum / float(num_batches)))
    log_string('accuracy: %f' % (total_correct / float(total_seen)))
Esempio n. 13
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    sem_seg_util.log_string(LOG_FOUT, '----')
    current_data, current_label, _ = provider.shuffle_data(
        train_data[:, 0:NUM_POINTS, :], train_label)

    file_size = current_data.shape[0]
    num_batches = file_size // (NUM_GPU * BATCH_SIZE)

    total_correct = 0
    total_seen = 0
    loss_sum = 0

    for batch_idx in range(num_batches):

        if batch_idx % 100 == 0:
            print('Current batch/total batch num: %d/%d' %
                  (batch_idx, num_batches))

        start_idx = []
        end_idx = []

        for gpu_idx in range(NUM_GPU):
            start_idx.append((batch_idx + gpu_idx) * BATCH_SIZE)
            end_idx.append((batch_idx + gpu_idx + 1) * BATCH_SIZE)

        feed_dict = dict()
        for gpu_idx in range(NUM_GPU):
            feed_dict[ops['inputs_phs'][gpu_idx]] = current_data[
                start_idx[gpu_idx]:end_idx[gpu_idx], :, :]
            feed_dict[ops['labels_phs'][gpu_idx]] = current_label[
                start_idx[gpu_idx]:end_idx[gpu_idx]]
            feed_dict[ops['is_training_phs'][gpu_idx]] = is_training

        summary, step, _, loss_val, pred_val = sess.run([
            ops['merged'], ops['step'], ops['train_op'], ops['loss'],
            ops['pred']
        ],
                                                        feed_dict=feed_dict)

        train_writer.add_summary(summary, step)
        pred_val = np.argmax(pred_val, 2)
        correct = np.sum(pred_val == current_label[start_idx[-1]:end_idx[-1]])
        total_correct += correct
        total_seen += (BATCH_SIZE * NUM_POINTS)
        loss_sum += loss_val

    sem_seg_util.log_string(LOG_FOUT,
                            'mean loss: %f' % (loss_sum / float(num_batches)))
    sem_seg_util.log_string(
        LOG_FOUT, 'accuracy: %f' % (total_correct / float(total_seen)))
def data_aug(method=0):
    ''' 输入参数
        method:0--------增加噪声
               1--------旋转物体
    '''
    object_data = np.zeros((1, 2048, 3))
    object_labels = np.zeros((1), np.int32)
    for fn in range(len(TRAIN_FILES)):
        print('loading the file' + str(fn))
        current_data, current_label = provider.loadDataFile(TRAIN_FILES[fn])
        for label in range(current_label.shape[0]):
            if current_label[label] == 10 or current_label[
                    label] == 38 or current_label[label] == 32:
                object_data = np.vstack(
                    (object_data,
                     current_data[label, :, :].reshape(1, 2048, 3)))
                object_labels = np.vstack(
                    (object_labels, current_label[label]))

    object_data = np.delete(object_data, 0, axis=0)
    object_labels = np.delete(object_labels, 0, axis=0)
    #加噪后的数据
    if method == 0:
        object_data = provider.jitter_point_cloud(object_data,
                                                  sigma=0.001,
                                                  clip=0.005)
    elif method == 1:
        need_to_rotate_labels = object_labels
        # 旋转6个角度
        for i in range(6):
            print(i)
            rotation_angle = i * (np.pi / 3.)
            rotate_data = provider.rotate_point_cloud_by_angle(
                object_data, rotation_angle)
            object_data = np.vstack((object_data, rotate_data))
            object_labels = np.vstack((object_labels, need_to_rotate_labels))

    for i in range(len(TRAIN_FILES)):
        current_data, current_label = provider.loadDataFile(TRAIN_FILES[fn])
        object_data = np.vstack((object_data, current_data))
        object_labels = np.vstack((object_labels, current_label))
        object_data, object_labels, _ = provider.shuffle_data(
            object_data, object_labels)
        n_object = object_data.shape[0]
        num_each_file = n_object // 6
        for i in range(6):
            f = h5py.File(data_dir + '/object_aug_rotate' + str(i) + '.h5',
                          'w')
            f['data'] = object_data[(i * num_each_file):(i + 1) *
                                    num_each_file, :, :]
            f['label'] = object_labels[i * (num_each_file):(i + 1) *
                                       num_each_file]
            f.close()
Esempio n. 15
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    log_string('----')
    current_data, current_label, _ = provider.shuffle_data(
        train_data[:, 0:NUM_POINT, :], train_label)

    file_size = current_data.shape[0]
    num_batches = file_size // (FLAGS.num_gpu * BATCH_SIZE)  # 需要多少个batch

    total_correct = 0
    total_seen = 0
    loss_sum = 0

    for batch_idx in range(num_batches):  # 遍历每一个batch
        if batch_idx % 100 == 0:
            print('Current batch/total batch num: %d/%d' %
                  (batch_idx, num_batches))
        start_idx_0 = batch_idx * BATCH_SIZE
        end_idx_0 = (batch_idx + 1) * BATCH_SIZE
        start_idx_1 = (batch_idx + 1) * BATCH_SIZE
        end_idx_1 = (batch_idx + 2) * BATCH_SIZE

        feed_dict = {
            ops['pointclouds_phs'][0]:
            current_data[start_idx_0:end_idx_0, :, :],
            ops['pointclouds_phs'][1]:
            current_data[start_idx_1:end_idx_1, :, :],
            ops['labels_phs'][0]: current_label[start_idx_0:end_idx_0],
            ops['labels_phs'][1]: current_label[start_idx_1:end_idx_1],
            ops['is_training_phs'][0]: is_training,
            ops['is_training_phs'][1]: is_training
        }
        summary, step, _, loss_val, pred_val = sess.run([
            ops['merged'], ops['step'], ops['train_op'], ops['loss'],
            ops['pred']
        ],
                                                        feed_dict=feed_dict)
        train_writer.add_summary(summary, step)
        pred_val = np.argmax(pred_val, 2)  # 当前batch的预测值
        correct = np.sum(
            pred_val ==
            current_label[start_idx_1:end_idx_1])  # 当前batch中预测正确的个数
        total_correct += correct  # 本次epoch的正确个数
        total_seen += (BATCH_SIZE * NUM_POINT)  # 本次epoch的所有点
        loss_sum += loss_val  # 本次epoch的总的损失

    log_string('mean loss: %f' %
               (loss_sum / float(num_batches)))  # 本次epoch的平均每一个batch的损失
    log_string('accuracy: %f' %
               (total_correct / float(total_seen)))  # 本次epoch的平均每一个batch的损失
Esempio n. 16
0
def provide_data(sess2):
    BATCH_SIZE = FLAGS.batch_size
    current_data, current_label = provider.loadDataFile('./data/h5/traincompleteall.h5')
    current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
    current_label = np.squeeze(current_label.astype(int))

    partial_data, partial_label = provider.loadDataFile('./data/h5/trainall.h5')
    partial_data, partial_label, _ = provider.shuffle_data(partial_data, np.squeeze(partial_label))
    partial_label = np.squeeze(partial_label.astype(int))


    file_size = current_data.shape[0]
    num_batches = file_size // BATCH_SIZE

    pointclouds_pl = sess2.graph.get_tensor_by_name('Placeholder:0')
    labels_pl = sess2.graph.get_tensor_by_name('Placeholder_1:0')
    is_train_pl = sess2.graph.get_tensor_by_name('Placeholder_2:0')

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE

        # mantipulation data
        rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :])
        jittered_data = provider.jitter_point_cloud(rotated_data)
        # mantipulate labe
        one_hot_labe1 = np.zeros((BATCH_SIZE, 41))
        one_hot_labe1[np.arange(BATCH_SIZE), current_label[start_idx:end_idx]] = 1
        one_hot_labe2 = np.zeros((BATCH_SIZE, 41))
        one_hot_labe2[np.arange(BATCH_SIZE), partial_label[start_idx:end_idx]] = 1

        is_train = False
        feed_dict = {pointclouds_pl: provider.rotate_point_cloud(partial_data[start_idx:end_idx, :, :]),
                     labels_pl: partial_label[start_idx:end_idx],
                     is_train_pl: is_train}
        G_features = sess2.run(sess2.graph.get_tensor_by_name('maxpool/maxpool:0'), feed_dict=feed_dict)
        #out['data'] = jittered_data
        #out['labe'] = one_hot_labe
        yield jittered_data, one_hot_labe1, current_label[start_idx:end_idx], np.squeeze(G_features), one_hot_labe2, partial_label[start_idx:end_idx]
Esempio n. 17
0
def train_one_epoch(sess, ops, train_writer, epoch):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    log_string('----')
    current_data, current_label, _ = provider.shuffle_data(
        train_data[:, 0:NUM_POINT, :], train_label[:, 0:NUM_POINT])

    file_size = current_data.shape[0]
    num_batches = file_size // BATCH_SIZE
    num_batches = min(200, num_batches)

    total_correct = 0
    total_seen = 0
    loss_sum = 0

    print('total batch num = ', num_batches)

    def log_train():
        log_string('epoch %d batch %d    train mean loss: %f' %
                   (epoch, batch_idx, loss_sum / float(num_batches)))
        log_string('epoch %d batch %d    train accuracy: %f' %
                   (epoch, batch_idx, total_correct / float(total_seen)))

    for batch_idx in range(num_batches):
        #if batch_idx % 100 == 0:
        #print('Current batch/total batch num: %d/%d'%(batch_idx,num_batches))
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE

        feed_dict = {
            ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
            ops['labels_pl']: current_label[start_idx:end_idx],
            ops['is_training_pl']: is_training,
        }
        summary, step, _, loss_val, pred_val = sess.run([
            ops['merged'], ops['step'], ops['train_op'], ops['loss'],
            ops['pred']
        ],
                                                        feed_dict=feed_dict)
        train_writer.add_summary(summary, step)
        pred_val = np.argmax(pred_val, 2)
        correct = np.sum(pred_val == current_label[start_idx:end_idx])
        total_correct += correct
        total_seen += (BATCH_SIZE * NUM_POINT)
        loss_sum += loss_val

        if (epoch == 0 and batch_idx <= 100) or batch_idx % 100 == 0:
            log_train()
    log_string('\n')
    log_train()
Esempio n. 18
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)
    train_files_number = len(TRAIN_FILES)
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]

    if PROFILE_DEBUG: train_files_number = 1
    for fn in range(train_files_number):
        if PROFILE_DEBUG: fn = 4
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:, 0:NUM_POINT, :]
        current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
        current_label = np.squeeze(current_label)

        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE

        total_correct = 0
        total_seen = 0
        loss_sum = 0
        if PROFILE_DEBUG: num_batches = 1
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
                         ops['labels_pl']: current_label[start_idx:end_idx],
                         ops['is_training_pl']: is_training}
            summary, step,_, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
                                                          ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += (loss_val * BATCH_SIZE)
            for i in range(start_idx, end_idx):
                l = current_label[i]
                total_seen_class[l] += 1
                total_correct_class[l] += (pred_val[i - start_idx] == l)

        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))
        log_string('accuracy total correct %f' % total_correct)
        log_string('accuracy total seen: %f' % (float(total_seen)))
Esempio n. 19
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True
    
    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)
    
    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:,0:NUM_POINT,:]
        current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))            
        current_label = np.squeeze(current_label)
        
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        
        total_correct = 0
        total_seen = 0
        loss_sum = 0
       
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx+1) * BATCH_SIZE
            
            # Augment batched point clouds by rotation and jittering
            rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx,:,:,])
            jittered_data = provider.jitter_point_cloud(rotated_data)
            if CROPOUT_TYPE == 'bounding_sphere':
                cropped_data = provider.cropout_point_cloud(jittered_data, FLAGS.max_trans_dist, random_trans_dist=True, close=FLAGS.close)
            elif CROPOUT_TYPE == 'bubble':
                cropped_data = provider.bubble_cropout(jittered_data, FLAGS.max_trans_dist, random_bubble_radius=False, close=FLAGS.close)
            else:
                print("cropeout_type does not exist")
                return
            feed_dict = {ops['pointclouds_pl']: cropped_data,
                         ops['labels_pl']: current_label[start_idx:end_idx],
                         ops['is_training_pl']: is_training,}
            summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
                ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val
        
        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))
Esempio n. 20
0
def data_saver(data_x, data_y, train_dir, test_dir):
    """ Do the shuffle and split and save data and labels into dictionaries
    """
    shuffled_x, shuffled_y, _ = shuffle_data(data_x, data_y)
    train_x, train_y, test_x, test_y = split_data(shuffled_x, shuffled_y, split_ratio=0.8)
    # save as dictionary
    train_dict = {'data': train_x, 'label': train_y}
    test_dict = {'data': test_x, 'label': test_y}
    np.save(train_dir, train_dict)
    np.save(test_dir, test_dict)

    print(f"\nData saved to {train_dir}, {test_dir}\n")
    print("\ntrain data shape:", train_x.shape)
    print("\ntest data shape:", test_x.shape)
def train_one_sub_epoch(sess, ops, train_writer, train_data, train_label):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    log_string('----')
    current_data, current_label, _ = provider.shuffle_data(
        train_data[:, 0:NUM_POINT, :], train_label)

    file_size = current_data.shape[0]
    num_batches = file_size // BATCH_SIZE

    total_correct = 0
    total_seen = 0
    loss_sum = 0

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE

        feed_dict = {
            ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
            ops['labels_pl']: current_label[start_idx:end_idx],
            ops['is_training_pl']: is_training,
        }
        summary, step, _, loss_val, pred_val = sess.run([
            ops['merged'], ops['step'], ops['train_op'], ops['loss'],
            ops['pred']
        ],
                                                        feed_dict=feed_dict)
        loss_path = '/home/jp/project/test/powernet/sem_seg/loss'
        if not os.path.exists(loss_path):
            os.makedirs(loss_path)

        train_writer.add_summary(summary, step)
        pred_val = np.argmax(pred_val, 2)
        correct = np.sum(pred_val == current_label[start_idx:end_idx])
        total_correct += correct
        total_seen += (BATCH_SIZE * NUM_POINT)
        loss_sum += loss_val

    log_string('mean loss: %f' % (loss_sum / float(num_batches)))
    log_string('accuracy: %f' % (total_correct / float(total_seen)))

    if BATCH_SIZE * num_batches < file_size:
        start_idx = num_batches * BATCH_SIZE
        return current_data[start_idx:, :, :], current_label[start_idx:]
    else:
        return np.expand_dims(current_data[-1, :, :],
                              axis=0), np.expand_dims(current_label[-1],
                                                      axis=0)
Esempio n. 22
0
        def train_one_epoch(train_file_idx, epoch_num):
            is_training = True

            total_correct = 0
            total_seen = 0
            loss_sum = 0
            num_batch = 0
             
            total_batch = 0

            for i in range(num_train_file):
                cur_train_filename = os.path.join(train_file_list[train_file_idx[i]])
                printout(flog, 'Loading train file ' + cur_train_filename)

                #================load train data=======================
                #load train data
                cur_data_all, cur_seg = load_h5(cur_train_filename)
                cur_data_all, cur_seg, order = provider.shuffle_data(cur_data_all, np.squeeze(cur_seg))

                '''#sample data
                a =  np.arange(cur_data.shape[0])[:,None]

                sample_top1 = np.argpartition(curv, -sample_num1, axis=1)[:, -sample_num1:]
                cur_data_sample1 = cur_data[a, sample_top1,:]'''
                
                num_data = len(cur_seg)
                num_batch = num_data // batch_size
                total_batch += num_batch

                for batch_idx in range(num_batch):
                    if batch_idx % 100 == 0:
                        print('Current batch/total batch num: %d/%d'%(batch_idx,num_batch))

                    start_idx = batch_idx * batch_size
                    end_idx = (batch_idx+1) * batch_size

                    feed_dict = {ops['pointclouds_pl']: cur_data_all[start_idx:end_idx, :, :],
                                 ops['labels_pl']: cur_seg[start_idx: end_idx, ...],
                                 ops['is_training_pl']: is_training,}
                    summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)

                    train_writer.add_summary(summary, step)
                    pred_val = np.argmax(pred_val, 2)
                    correct = np.sum(pred_val == cur_seg[start_idx:end_idx,:])
                    total_correct += correct
                    total_seen += (batch_size*point_num)
                    loss_sum += loss_val
            
            printout(flog, '\t\tTraining Mean_loss: %f' % (loss_sum / float(total_batch)))
            printout(flog, '\t\tTraining Seg Accuracy: %f' % (total_correct / float(total_seen)))
Esempio n. 23
0
def train_classifier_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    for fn in range(len(TRAIN_FILES_CLS)):
        # Shuffle train files
        current_data, current_label = provider.loadDataFile(
            TRAIN_FILES_CLS[fn])
        current_data, current_label, _ = provider.shuffle_data(
            current_data, np.squeeze(current_label))
        current_label = np.squeeze(current_label)
        # I find that we can increase the accuracy by about 0.2% after
        # padding zero vectors, but I do not know the reason.
        current_data = np.concatenate([
            current_data,
            np.zeros((current_data.shape[0],
                      NUM_FEATURE_CLS - current_data.shape[1]))
        ],
                                      axis=-1)
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE

        total_correct = 0
        total_seen = 0
        loss_sum = 0

        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx + 1) * BATCH_SIZE

            # Input the features and labels to the graph.
            feed_dict = {
                ops['pointclouds_pl']: current_data[start_idx:end_idx, ...],
                ops['labels_pl']: current_label[start_idx:end_idx],
                ops['is_training_pl']: is_training,
            }
            # Calculate the loss and classification scores.
            summary, step, _, loss_val, pred_val = sess.run(
                [
                    ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                    ops['pred']
                ],
                feed_dict=feed_dict)

            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val
Esempio n. 24
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    log_string('----')
    current_data, current_label, _ = provider.shuffle_data(
        train_data[:, 0:NUM_POINT, :], train_label)

    file_size = current_data.shape[0]
    num_batches = file_size // BATCH_SIZE

    total_correct = 0
    total_seen = 0
    loss_sum = 0

    for batch_idx in range(num_batches):
        if batch_idx % 10 == 0:
            print('Current batch/total batch num: %d/%d' %
                  (batch_idx, num_batches))
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE

        feed_dict = {
            ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
            ops['labels_pl']: current_label[start_idx:end_idx],
            ops['is_training_pl']: is_training,
        }
        summary, step, _, loss_val, pred_val = sess.run([
            ops['merged'], ops['step'], ops['train_op'], ops['loss'],
            ops['pred']
        ],
                                                        feed_dict=feed_dict)
        loss_path = '/home/jp/project/test/powernet/sem_seg/loss'
        loss_name = loss_path + '/' + str(batch_idx) + '_loss.npy'
        pred_name = loss_path + '/' + str(batch_idx) + 'pred.npy'
        label_name = loss_path + '/' + str(batch_idx) + 'label.npy'
        np.save(loss_name, loss_val)
        np.save(pred_name, pred_val)
        np.save(label_name, current_label[start_idx:end_idx])
        train_writer.add_summary(summary, step)
        pred_val = np.argmax(pred_val, 2)
        correct = np.sum(pred_val == current_label[start_idx:end_idx])
        total_correct += correct
        total_seen += (BATCH_SIZE * NUM_POINT)
        loss_sum += loss_val

    print(loss_sum)
    print(num_batches)
    log_string('mean loss: %f' % (loss_sum / float(num_batches)))
    log_string('accuracy: %f' % (total_correct / float(total_seen)))
Esempio n. 25
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True
    
    # Shuffle train files
    # 随机打乱训练数据
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)
    
    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:,0:NUM_POINT,:]
        current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))            
        current_label = np.squeeze(current_label)
        
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        
        total_correct = 0
        total_seen = 0
        loss_sum = 0

        # 在一个epoch 中逐个mini-batch训练直至遍历完一遍训练集。计算总分类正确数total_correct和已遍历样本数
        # total_senn,总损失loss_sum.
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx+1) * BATCH_SIZE
            
            # Augment batched point clouds by rotation and jittering
            # 调用provider中rotate_point_cloud
            rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :])
            jittered_data = provider.jitter_point_cloud(rotated_data)
            feed_dict = {ops['pointclouds_pl']: jittered_data,
                         ops['labels_pl']: current_label[start_idx:end_idx],
                         ops['is_training_pl']: is_training,}
            summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
                ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
            # 训练,使用 tf 的 session 运行设计的框架,ops['pred'] 为整个网络,feed_dict 为网络提供的数据
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val

        # 记录平均loss,以及平均accuracy。
        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))
Esempio n. 26
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    log_string('----')
    current_data, current_label, _ = provider.shuffle_data(
        train_data[:, 0:NUM_POINT, :], train_label)

    file_size = current_data.shape[0]
    num_batches = file_size // BATCH_SIZE

    total_correct = 0
    total_seen = 0
    loss_sum = 0

    for batch_idx in range(num_batches):
        if batch_idx % 100 == 0:
            print('Current batch/total batch num: %d/%d' %
                  (batch_idx, num_batches))
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE

        # num_scale x m x n x 3
        current_point_3d_voxel_id = provider.voxle_3d_id_for_batch_data(
            current_data[start_idx:end_idx, :, :], max_num, num_scale)

        feed_dict = {
            ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
            ops['labels_pl']: current_label[start_idx:end_idx],
            ops['point_3d_voxel_id']: current_point_3d_voxel_id,
            ops['is_training_pl']: is_training,
        }
        summary, step, _, loss_val, pred_val = sess.run([
            ops['merged'], ops['step'], ops['train_op'], ops['loss'],
            ops['pred']
        ],
                                                        feed_dict=feed_dict)
        train_writer.add_summary(summary, step)
        pred_val = np.argmax(pred_val, 2)
        correct = np.sum(pred_val == current_label[start_idx:end_idx])
        total_correct += correct
        total_seen += (BATCH_SIZE * NUM_POINT)
        loss_sum += loss_val

    log_string('mean loss: %f' % (loss_sum / float(num_batches)))
    log_string('accuracy: %f' % (total_correct / float(total_seen)))
Esempio n. 27
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    loss_sum = 0

    current_data_pl, current_label = provider.load_h5(TRAIN_FILE, 'class')
    #,nevts=5e5
    if multi:
        current_label = np.argmax(current_label, axis=-1)
    current_data_pl, current_label, _ = provider.shuffle_data(
        current_data_pl, np.squeeze(current_label))

    file_size = current_data_pl.shape[0]
    num_batches = file_size // BATCH_SIZE
    #num_batches = 4
    log_string(str(datetime.now()))
    for batch_idx in range(num_batches):

        start_idx = batch_idx * (BATCH_SIZE)
        end_idx = (batch_idx + 1) * (BATCH_SIZE)
        batch_data_pl, batch_label = get_batch(current_data_pl, current_label,
                                               start_idx, end_idx)
        mask_padded = batch_data_pl[:, :, 2] == 0

        feed_dict = {
            ops['pointclouds_pl']: batch_data_pl,
            ops['labels_pl']: batch_label,
            ops['mask_pl']: mask_padded.astype(float),
            ops['is_training']: is_training,
        }

        train_op = 'train_op'
        attention = 'attention'
        loss = 'loss'
        summary, step, _, loss, attention = sess.run([
            ops['merged'], ops['step'], ops['train_op'], ops['loss'],
            ops['attention']
        ],
                                                     feed_dict=feed_dict)

        #print(attention)
        train_writer.add_summary(summary, step)
        loss_sum += np.mean(loss)

    log_string('mean loss: %f' % (loss_sum / float(num_batches)))
Esempio n. 28
0
def get_split_data(category_list, modelnet_dir, split):
    data, labels = [], []
    for category, i in zip(category_list, range(len(category_list))):
        print(category)
        category_files_path = os.path.join(modelnet_dir,
                                           os.path.join(category, split))
        files = os.listdir(category_files_path)
        category_data = np.zeros((len(files), NUM_POINT, 3))
        category_label = np.full(len(files), i)
        for file, j in zip(files, range(len(files))):
            category_data[j] = sample_file(category_files_path, file)
        data.append(category_data)
        labels.append(category_label)
    data = np.concatenate(np.array(data), axis=0)
    labels = np.concatenate(labels, axis=0)
    shuffled_data, shuffled_labels, idx = provider.shuffle_data(data, labels)
    return shuffled_data, shuffled_labels
Esempio n. 29
0
def train_one_epoch(current_data, current_label, sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True
    is_training_base = True

    # Shuffle train files
    current_data, current_label, _ = provider.shuffle_data(
        current_data, np.squeeze(current_label))
    current_label = np.squeeze(current_label)

    file_size = current_data.shape[0]
    num_batches = file_size // BATCH_SIZE

    total_correct = 0
    total_seen = 0
    loss_sum = 0

    for batch_idx in range(num_batches):
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE

        # Augment batched point clouds by rotation and jittering
        rotated_data, _ = provider.rotate_point_cloud(
            current_data[start_idx:end_idx, :, :])
        jittered_data = provider.jitter_point_cloud(rotated_data)
        feed_dict = {
            ops['pointclouds_pl']: jittered_data,
            ops['labels_pl']: current_label[start_idx:end_idx],
            ops['is_training_pl']: is_training,
            ops['is_training_base_pl']: is_training_base,
        }
        summary, step, _, loss_val, pred_val = sess.run([
            ops['merged'], ops['step'], ops['train_op'], ops['loss'],
            ops['pred']
        ],
                                                        feed_dict=feed_dict)
        train_writer.add_summary(summary, step)
        pred_val = np.argmax(pred_val, 1)
        correct = np.sum(pred_val == current_label[start_idx:end_idx])
        total_correct += correct
        total_seen += BATCH_SIZE
        loss_sum += loss_val

    log_string('mean loss: %f' % (loss_sum / float(num_batches)))
    log_string('accuracy: %f' % (total_correct / float(total_seen)))
 def forward(self, bottom, top):
     data = np.array(bottom[0].data)
     label = np.array(bottom[1].data)
     if self.is_seg:
         seg = np.array(bottom[2].data)
     # shuffle data
     aug_data, aug_label, order = provider.shuffle_data(
         data, np.squeeze(label))
     if self.is_seg:
         aug_seg = seg[order, ...]
     # Augment batched point clouds by rotation and jittering
     rotated_data = provider.rotate_point_cloud(aug_data)
     jittered_data = provider.jitter_point_cloud(rotated_data)
     # assign top data
     top[0].data[...] = jittered_data
     top[1].data[...] = aug_label
     if self.is_seg:
         top[2].data[...] = aug_seg
Esempio n. 31
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True

    log_string('----')
    current_data, current_label, shuffled_idx = provider.shuffle_data(
        train_data[:, 0:NUM_POINT, :], train_group)
    current_sem = train_sem[shuffled_idx]

    file_size = current_data.shape[0]
    num_batches = file_size // BATCH_SIZE

    loss_sum = 0

    for batch_idx in range(num_batches):
        if batch_idx % 100 == 0:
            print('Current batch/total batch num: %d/%d' %
                  (batch_idx, num_batches))
        start_idx = batch_idx * BATCH_SIZE
        end_idx = (batch_idx + 1) * BATCH_SIZE

        feed_dict = {
            ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
            ops['labels_pl']: current_label[start_idx:end_idx],
            ops['sem_labels_pl']: current_sem[start_idx:end_idx],
            ops['is_training_pl']: is_training,
        }
        summary, step, _, loss_val, sem_loss_val, disc_loss_val, l_var_val, l_dist_val, l_reg_val = sess.run(
            [
                ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                ops['sem_loss'], ops['disc_loss'], ops['l_var'], ops['l_dist'],
                ops['l_reg']
            ],
            feed_dict=feed_dict)
        train_writer.add_summary(summary, step)
        loss_sum += loss_val

        if batch_idx % 50 == 0:
            log_string(
                "loss: {:.2f}; sem_loss: {:.2f}; disc_loss: {:.2f}; l_var: {:.2f}; l_dist: {:.2f}; l_reg: {:.3f}."
                .format(loss_val, sem_loss_val, disc_loss_val, l_var_val,
                        l_dist_val, l_reg_val))

    log_string('mean loss: %f' % (loss_sum / float(num_batches)))
Esempio n. 32
0
def train_one_epoch(sess, ops, train_writer):
    """ ops: dict mapping from string to tf ops """
    is_training = True
    
    # Shuffle train files
    train_file_idxs = np.arange(0, len(TRAIN_FILES))
    np.random.shuffle(train_file_idxs)
    
    for fn in range(len(TRAIN_FILES)):
        log_string('----' + str(fn) + '-----')
        current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]])
        current_data = current_data[:,0:NUM_POINT,:]
        current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))            
        current_label = np.squeeze(current_label)
        
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        
        total_correct = 0
        total_seen = 0
        loss_sum = 0
       
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx+1) * BATCH_SIZE
            
            # Augment batched point clouds by rotation and jittering
            rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :])
            jittered_data = provider.jitter_point_cloud(rotated_data)
            feed_dict = {ops['pointclouds_pl']: jittered_data,
                         ops['labels_pl']: current_label[start_idx:end_idx],
                         ops['is_training_pl']: is_training,}
            summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
                ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
            train_writer.add_summary(summary, step)
            pred_val = np.argmax(pred_val, 1)
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            total_correct += correct
            total_seen += BATCH_SIZE
            loss_sum += loss_val
        
        log_string('mean loss: %f' % (loss_sum / float(num_batches)))
        log_string('accuracy: %f' % (total_correct / float(total_seen)))