Esempio n. 1
0
def main():
    """Read .pkl file, parse its metadata, plot Gram matrix and save as pdf file with matplotlib.
    """
    filename = os.path.abspath(sys.argv[1])
    title = sys.argv[2] if len(sys.argv) > 2 else ""

    if filename[-4:] == ".pkl":
        dat = file_utils.load_pickle(filename)
        filename_pdf_ = filename.replace(".pkl", ".pdf")
    elif filename[-5:] == ".hdf5":
        dat = file_utils.load_hdf5(filename)
        filename_pdf_ = filename.replace(".hdf5", ".pdf")
    else:
        assert False
    dataset_type = dat['dataset_type']
    gram_matrices = dat['gram_matrices']
    sample_names = dat['sample_names']

    labels, separators, dataset_name, rotate = get_informations(
        dataset_type, sample_names)

    matrices = gram_matrices[-1]
    for key in matrices.keys():
        filename_pdf = filename_pdf_.replace(".pdf", "_" + key + ".pdf")
        plot_title = title + " " + key.replace("_", " ")
        plot_gram_to_pdf(filename_pdf,
                         matrices[key],
                         sample_names,
                         separators,
                         labels,
                         dataset_name,
                         title=plot_title,
                         rotate_vertically=rotate)
Esempio n. 2
0
def load_batch_generator(params):
    for step in range(0, params['max_train_steps']):
        # 存储路径
        pickle_path = os.path.join(params['train_pickle_dir'],
                                   str(step) + '.pickle')
        # 加载处理好的pickle文件
        batch_data = load_pickle(pickle_path)
        yield batch_data
Esempio n. 3
0
def play_q_learning_model(level='level-0', model_path='./q_table.pkl'):
    q_model = QLearn()
    q_model.q_table = load_pickle(model_path)

    def ai_func(current_game_state):
        return q_model.pick_optimal_action(current_game_state, printing=False)

    game = Game(level, init_screen=True, ai_function=ai_func)
    game.run()
Esempio n. 4
0
def run_with_game_loop(level='level-0', model_path='./q_table.pkl'):

    q_model = QLearn()
    q_model.q_table = load_pickle(model_path)

    def ai_func(current_game_state):
        return q_model.pick_optimal_action(current_game_state)

    game = Game(level, init_screen=True, ai_function=ai_func)
    game.run()
Esempio n. 5
0
    def __getitem__(self, idx):
        rgb = Image.open(self.rgb_files[idx])
        rgb = self.transforms_rgb(rgb).float()
        rgb_pc = Image.open(self.rgb_files[idx])
        rgb_pc = self.transforms_rgb_pc(rgb_pc).float()

        depth = np.array(
            self.transforms_depth(Image.open(self.depth_files[idx]))) / 1000
        meta = load_pickle(training_data_dir + '/2-27-8_meta.pkl')

        intrinsic = meta["intrinsic"]
        z = depth
        v, u = np.indices(z.shape)
        uv1 = np.stack([u + 0.5, v + 0.5, np.ones_like(z)], axis=-1)
        points_viewer = uv1 @ np.linalg.inv(intrinsic).T * z[...,
                                                             None]  # [H, W, 3]
        xyz = torch.from_numpy(points_viewer).float().permute(2, 0, 1)
        pc = torch.cat([rgb_pc.view([3, -1]), xyz.view([3, -1])], dim=0)

        return pc, rgb, self.instance_ids[idx]
Esempio n. 6
0
def main():
    """Read .mat file and plot Gram matrix to html with plotly.
    """
    filename = sys.argv[1]    
    if filename[-4:] == ".pkl":
        dat = file_utils.load_pickle(filename)
        filename_html_ = filename.replace(".pkl", ".html")
    elif filename[-5:] == ".hdf5":
        dat = file_utils.load_hdf5(filename)
        filename_html_ = filename.replace(".hdf5", ".html")
    else:
        assert False
    dataset_type = dat['dataset_type']
    gram_matrices = dat['gram_matrices']
    sample_names = dat['sample_names']

    matrices = gram_matrices[-1]
    for key in matrices.keys():
        filename_html = filename_html_.replace(".html", "_" + key + ".html")
        plot_gram_to_html(filename_html, matrices[key], sample_names)
Esempio n. 7
0
    def __getitem__(self, idx):
        rgb = Image.open(self.rgb_files[idx])
        rgb = self.transforms_rgb(rgb).float()
        rgb_pc = Image.open(self.rgb_files[idx])
        rgb_pc = self.transforms_rgb_pc(rgb_pc).float()

        depth = np.array(
            self.transforms_depth(Image.open(self.depth_files[idx]))) / 1000
        meta = load_pickle(self.meta_files[idx])

        intrinsic = meta["intrinsic"]
        z = depth
        v, u = np.indices(z.shape)
        uv1 = np.stack([u + 0.5, v + 0.5, np.ones_like(z)], axis=-1)
        points_viewer = uv1 @ np.linalg.inv(intrinsic).T * z[...,
                                                             None]  # [H, W, 3]
        xyz = torch.from_numpy(points_viewer).float().permute(2, 0, 1)
        pc = torch.cat([rgb_pc.view([3, -1]), xyz.view([3, -1])], dim=0)
        pc = (pc - pc.mean(dim=0)) / pc.std(dim=0)

        label = Image.open(self.label_files[idx])
        label = (self.transforms_label(label) * 255).long().squeeze(dim=0)

        return pc, rgb, label, self.instance_ids[idx]
Esempio n. 8
0
                                                       meta_files):

    instance_name = parse("{}/v2.2/{}_meta.pkl", meta_file)[1]

    print('processing #%d %s' % (counter, instance_name))
    counter += 1
    testing_pc_file = testing_pc_root + '/%s.ply' % instance_name
    testing_pc = get_pc_from_image_files(rgb_file, depth_file, meta_file)
    #if not os.path.exists(testing_pc_file):
    #    print(np.asarray(testing_pc.points).shape)
    #    o3d.io.write_point_cloud(testing_pc_file, testing_pc)
    #else:
    #    testing_pc = o3d.io.read_point_cloud(testing_pc_file)

    label = np.array(Image.open(label_file))
    meta = load_pickle(meta_file)

    scales = meta['scales']
    object_ids = meta['object_ids']
    ans_scene = {}
    poses = []
    for object_id, scale in enumerate(scales):
        if scale is None:
            poses.append(None)
            continue

        scale = scales[object_id]
        point_idx = label.flatten() == object_id
        testing_pts = np.asarray(testing_pc.points)
        testing_colors = np.asarray(testing_pc.colors)
#testing_pc_root = data_root_dir + '/testing_pc'
#rgb_files, depth_files, label_files, meta_files = get_data_files(testing_data_dir,
#                                                                 target_levels=(1,2))
testing_pcs = []

ans = {}
counter = 0
object_counter = 0
correct_object_counter = 0

num_processes = 16
num_initial_poses = 256
pool = Pool(num_processes)
target_instance_name = '2-224-11'
target_object_id = 76
Rs = load_pickle(data_root_dir + '/sym_Rs.pkl')[-1]
#rgb_files = rgb_files[457:]
#depth_files = depth_files[457:]
#label_files = label_files[457:]
#meta_files = meta_files[457:]
for rgb_file, depth_file, label_file, meta_file in zip(rgb_files, depth_files,
                                                       label_files,
                                                       meta_files):
    start_time = time.time()
    instance_name = parse("{}/v2.2/{}_meta.pkl", meta_file)[1]
    #if instance_name!=target_instance_name:
    #    continue
    #print('processing #%d %s'%(counter, instance_name))
    counter += 1
    #if(counter>1):
    #    break
Esempio n. 10
0
def run(pickle_or_hdf5_location, dataset_location, fold_count, fold_to_drop,
        algorithm, params, output_dir, output_filename_format, output_file):
    ########
    # Create output directory and backup the configuration file to the directory
    ########
    os.makedirs(output_dir, exist_ok=True)
    try:
        shutil.copy(os.path.abspath(sys.argv[2]),
                    os.path.join(output_dir, os.path.basename(sys.argv[2])))
    except shutil.SameFileError:
        pass
    hdf5 = pickle_or_hdf5_location[-4:] == "hdf5"
    check_fold(fold_count, fold_to_drop, hdf5)
    check_algorithm(algorithm)
    check_params(algorithm, params)

    pickle_or_hdf5_location = os.path.abspath(pickle_or_hdf5_location)
    dataset_location = os.path.abspath(dataset_location)
    output_dir = os.path.abspath(output_dir)
    assert os.path.isdir(output_dir)
    assert os.path.exists(pickle_or_hdf5_location)

    ########
    # Load complete GRAM matrix
    ########
    time_main_start = os.times()

    hdf5 = pickle_or_hdf5_location[-4:] == "hdf5"
    if hdf5:
        loaded_data = file_utils.load_hdf5(pickle_or_hdf5_location)
    else:
        loaded_data = file_utils.load_pickle(pickle_or_hdf5_location)
        check_pickle_format(loaded_data)

    dataset_type = loaded_data['dataset_type']
    if dataset_type == 'UCIauslan':
        loaded_sample_names = loaded_data['sample_names']
    else:
        loaded_sample_names = [
            s.split('/')[-1].split('.')[0] for s in loaded_data['sample_names']
        ]
    gram_matrices = loaded_data['gram_matrices']
    if len(gram_matrices) == 1:
        gram = gram_matrices[0]['original']
    else:
        gram = gram_matrices[-1]['completed_npsd']

    # drop elements
    if fold_count == 0:
        gram_drop = gram
    else:
        folds = k_fold_cross_validation.get_kfolds(dataset_type,
                                                   loaded_sample_names,
                                                   fold_count)
        indices_to_drop = folds[fold_to_drop - 1]
        gram_drop, dropped_elements = make_matrix_incomplete.gram_drop_samples(
            gram, indices_to_drop)

    ########
    # Prepare time-series data
    ########
    seqs, sample_names, labels_str, _ = read_sequences(dataset_type,
                                                       dataset_location)

    seqs = filter_samples(seqs, sample_names, loaded_sample_names)
    labels_str = filter_samples(labels_str, sample_names, loaded_sample_names)

    ########
    # Execute Matrix Completion
    ########
    train_start = None
    train_end = None
    if algorithm == "gak":
        ########
        # Baseline GAK
        ########
        gram_completed, time_completion_start, time_completion_end \
            = matrix_completion.gak_matrix_completion(
                gram_drop, seqs, indices_to_drop,
                sigma=params['sigma'], triangular=params['triangular'])
        action = "GAK sigma: " + str(params['sigma']) + " triangular: " + str(
            params['triangular'])
        output_filename_format = output_filename_format.replace(
            "${sigma}",
            str(params['sigma'])).replace("${triangular}",
                                          str(params['triangular']))
    elif algorithm in {"softimpute", "knn", "iterativesvd"}:
        ########
        # Baseline SoftImpute, KNN, IterativeSVD
        ########
        if algorithm == "softimpute":
            func = matrix_completion.softimpute_matrix_completion
            action = "Softimpute"
            print('running SoftImpute')
        elif algorithm == "knn":
            func = matrix_completion.knn_matrix_completion
            action = "KNN"
            print('running KNN')
        elif algorithm == "iterativesvd":
            func = matrix_completion.iterativesvd_matrix_completion
            action = "IterativeSVD"
            print('running IterativeSVD')
        else:
            print("unsupported fancyimpute algorithm")
            exit(-1)
        flag_test = np.zeros(len(seqs))
        flag_test[indices_to_drop] = 1
        drop_flag_matrix = create_true_GAK_flag_matrix(1 - params['gak_rate'],
                                                       flag_test)
        for i in range(len(seqs)):
            drop_flag_matrix[i, i] = 1
            for j in range(i + 1):
                if i not in indices_to_drop and j not in indices_to_drop:
                    drop_flag_matrix[i, j] = 1
                    drop_flag_matrix[j, i] = 1

        print(len(seqs)**2)
        print(np.count_nonzero(drop_flag_matrix))
        gram_completed, time_completion_start, time_completion_end \
            = func(gram_drop,
                   seqs,
                   sigma=params['sigma'],
                   triangular=params['triangular'],
                   num_process=params['num_process'],
                   drop_flag_matrix=drop_flag_matrix)
    elif algorithm == "rnn":
        ########
        # Our Scheme, Siamese Recurrent Neural Network
        ########
        modelfile_hdf5 = os.path.join(output_dir,
                                      output_filename_format + "_model.hdf5")
        logfile_loss = os.path.join(output_dir,
                                    output_filename_format + ".losses")
        gram_completed, time_train_start, time_train_end, \
            time_completion_start, time_completion_end \
            = matrix_completion.rnn_matrix_completion(
                gram_drop,
                seqs,
                params['epochs'],
                params['patience'],
                params['epoch_start_from'],
                logfile_loss,
                modelfile_hdf5,
                params['rnn'],
                params['rnn_units'],
                params['dense_units'],
                params['dropout'],
                params['implementation'],
                params['bidirectional'],
                params['batchnormalization'],
                params['mode'],
                params['loss_function'],
                params['loss_weight_ratio'],
                labels_str,
                params['siamese_joint_method'],
                params['siamese_arms_activation'],
                trained_modelfile_hdf5=params['trained_modelfile_hdf5'])
        action = "SiameseRNN"
    elif algorithm == "fast_rnn":
        ########
        # Our Scheme, Fast Siamese Recurrent Neural Network
        ########
        modelfile_hdf5 = os.path.join(output_dir,
                                      output_filename_format + "_model.hdf5")
        logfile_loss = os.path.join(output_dir,
                                    output_filename_format + ".losses")
        gram_completed, time_completion_start, time_completion_end \
            = matrix_completion.fast_rnn_matrix_completion(
                gram_drop,
                seqs,
                params['rnn'],
                params['rnn_units'],
                params['dense_units'],
                params['dropout'],
                params['implementation'],
                params['bidirectional'],
                params['batchnormalization'],
                params['loss_function'],
                params['siamese_arms_activation'],
                params['siamese_joint_method'],
                trained_modelfile_hdf5=params['trained_modelfile_hdf5'])
        action = "FastSiameseRNN"
    else:
        assert False

    ########
    # Make the completed matrix positive semidefinite, if it is not.
    ########

    # eigenvalue check
    time_npsd_start = os.times()
    gram_completed_npsd = nearest_positive_semidefinite.nearest_positive_semidefinite(
        gram_completed)
    time_npsd_end = os.times()

    ########
    # Save results
    ########
    if hdf5:
        log_file = os.path.join(output_dir, output_filename_format + ".hdf5")
    else:
        log_file = os.path.join(output_dir, output_filename_format + ".pkl")
    action += " " + time.asctime(time.localtime())
    file_utils.append_and_save_result(log_file,
                                      loaded_data,
                                      gram_drop,
                                      gram_completed,
                                      gram_completed_npsd,
                                      indices_to_drop,
                                      action,
                                      hdf5=hdf5)

    # claculate errors
    mse, mse_dropped, mae, mae_dropped, \
        relative, relative_dropped = calculate_errors(gram, gram_completed_npsd, dropped_elements)

    time_main_end = os.times()

    # save run times and errors
    num_calculated_elements = len(dropped_elements) - len(indices_to_drop) // 2
    num_dropped_sequences = len(indices_to_drop)
    out_path = os.path.join(output_dir, output_file)
    file_utils.save_analysis(out_path, len(dropped_elements),
                             num_dropped_sequences, num_calculated_elements,
                             time_completion_start, time_completion_end,
                             time_npsd_start, time_npsd_end, time_main_start,
                             time_main_end, mse, mse_dropped, mae, mae_dropped,
                             relative, relative_dropped)
Esempio n. 11
0
def run(pickle_or_hdf5_location, dataset_location, fold_to_test, fold_to_tv,
        fold_count, params,
        output_dir, output_filename_format, data_augmentation_size):
    os.makedirs(output_dir, exist_ok=True)
    shutil.copy(os.path.abspath(sys.argv[2]), os.path.join(output_dir, os.path.basename(sys.argv[2])))
    hdf5 = pickle_or_hdf5_location[-4:] == "hdf5"
    if hdf5:
        loaded_data = file_utils.load_hdf5(os.path.abspath(pickle_or_hdf5_location))
    else:
        loaded_data = file_utils.load_pickle(os.path.abspath(pickle_or_hdf5_location))

    dataset_type = loaded_data['dataset_type']
    sample_names = [s.split('/')[-1].split('.')[0] for s in loaded_data['sample_names']]

    gram_matrices = loaded_data['gram_matrices']
    gram = gram_matrices[0]['original']
    
    sample_names = loaded_data['sample_names']
    
    folds = k_fold_cross_validation.get_kfolds(dataset_type, sample_names, fold_count)
    folds = np.array(folds)
    test_indices = np.concatenate(folds[fold_to_test])
    tv_indices = np.concatenate(folds[fold_to_tv])
    fold_for_gram = np.delete(np.arange(fold_count), fold_to_test + fold_to_tv)
    gram_indices = np.concatenate(folds[fold_for_gram]).astype(int)
    
    seqs, key_to_str, _ = read_sequences(dataset_type, dataset_location)
    augmentation_magnification = 1.2
    seqs, key_to_str, flag_augmented = augment_data(seqs, key_to_str,
                                                    augmentation_magnification,
                                                    rand_uniform=True,
                                                    num_normaldist_ave=data_augmentation_size - 2)

    
    seqs = filter_samples(seqs, sample_names)
    key_to_str = filter_samples(key_to_str, sample_names)

    logfile_hdf5 = os.path.join(output_dir, output_filename_format + "_model.hdf5")
    logfile_loss = os.path.join(output_dir, output_filename_format + ".losses")
    output_file  = os.path.join(output_dir, output_filename_format + ".json")
    
    (roc_auc_score, f1_score) = KSS_unsupervised_alpha_prediction.get_classification_error(
        gram,
        gram_indices,
        tv_indices,
        test_indices,
        list(seqs.values()),
        params['epochs'],
        params['patience'],
        logfile_hdf5,
        logfile_loss,
        params['rnn'],
        params['rnn_units'],
        params['dense_units'],
        params['dropout'],
        params['implementation'],
        params['bidirectional'],
        params['batchnormalization'],
        params['mode'],
        list(key_to_str.values()),
        params['lmbd'],
        params['top_activation'])

    print(pickle_or_hdf5_location + " roc_auc_score: " + str(roc_auc_score) + " f1_score: " + str(f1_score))
    dic = dict(roc_auc_score=roc_auc_score,
               f1_score=f1_score)
    
    file_utils.save_json(output_file, dic)
from pointnet.datasets import PoseTrainingDataset, PoseTestingDataset
from utils.file_utils import training_data_dir, testing_data_dir, load_pickle, \
    data_root_dir, testing_data_root, root_dir
from torch.utils.data import DataLoader, random_split
from pointnet.loss import PoseLoss
from pointnet.model import PointNetRot6d, FCNet, PointNetCls, PointNetRot9d, PointNetRot6d_Wide
from benchmark_utils.pose_evaluator import PoseEvaluator
import time
from tqdm import tqdm
from pointnet.prepare_data import unbatch_prediction
import json
import os
#'''
csv_path = testing_data_root + '/objects_v1.csv'
pose_evaluator = PoseEvaluator(csv_path)
object_names = load_pickle(data_root_dir + '/object_names.pkl')


def eval(pred, gt, object_id):
    R_pred = pred[:3, :3]
    t_pred = pred[:3, 3]
    R_gt = gt[:3, :3]
    t_gt = gt[:3, 3]
    object_name = object_names[object_id]
    result = pose_evaluator.evaluate(object_name, R_pred, R_gt, t_pred, t_gt,
                                     np.ones(3))
    return result['rre_symmetry'], result['pts_err']


num_epochs = 10000
batch_size = 256
Esempio n. 13
0
 def __init__(self, data_path):
     super(PoseTrainingDataset, self).__init__()
     self.pc_files = load_pickle(data_path + '/pc_filenames.pkl')
     self.gt_poses = load_pickle(data_path + '/gt_poses.pkl')
     self.model_ids = load_pickle(data_path + '/model_ids.pkl')
     self.pc_means = load_pickle(data_path + '/pc_means.pkl')
Esempio n. 14
0
def run(pickle_or_hdf5_location, dataset_location, fold_count, fold_to_drop,
        params, output_dir, output_filename_format, output_file,
        data_augmentation_size):
    os.makedirs(output_dir, exist_ok=True)
    try:
        shutil.copy(os.path.abspath(sys.argv[2]),
                    os.path.join(output_dir, os.path.basename(sys.argv[2])))
    except shutil.SameFileError:
        pass
    hdf5 = pickle_or_hdf5_location[-4:] == "hdf5"
    check_fold(fold_count, fold_to_drop, hdf5)

    pickle_or_hdf5_location = os.path.abspath(pickle_or_hdf5_location)
    dataset_location = os.path.abspath(dataset_location)
    output_dir = os.path.abspath(output_dir)
    assert os.path.isdir(output_dir)
    assert os.path.exists(pickle_or_hdf5_location)

    main_start = os.times()

    hdf5 = pickle_or_hdf5_location[-4:] == "hdf5"
    if hdf5:
        loaded_data = file_utils.load_hdf5(pickle_or_hdf5_location)
    else:
        loaded_data = file_utils.load_pickle(pickle_or_hdf5_location)

    dataset_type = loaded_data['dataset_type']
    if dataset_type == 'UCIauslan':
        loaded_sample_names = loaded_data['sample_names']
    else:
        loaded_sample_names = [
            s.split('/')[-1].split('.')[0] for s in loaded_data['sample_names']
        ]
    gram_matrices = loaded_data['gram_matrices']
    if len(gram_matrices) == 1:
        gram = gram_matrices[0]['original']
    else:
        gram = gram_matrices[-1]['completed_npsd']

    # drop elements
    if fold_count == 0:
        gram_drop = gram
    else:
        folds = k_fold_cross_validation.get_kfolds(dataset_type,
                                                   loaded_sample_names,
                                                   fold_count)
        indices_to_drop = folds[fold_to_drop - 1]
        gram_drop, dropped_elements = make_matrix_incomplete.gram_drop_samples(
            gram, indices_to_drop)

    seqs, sample_names, labels_str, _ = read_sequences(dataset_type,
                                                       dataset_location)

    seqs = filter_samples(seqs, sample_names, loaded_sample_names)
    labels_str = filter_samples(labels_str, sample_names, loaded_sample_names)

    train_start = None
    train_end = None

    modelfile_hdf5 = os.path.join(output_dir,
                                  output_filename_format + "_model.hdf5")
    logfile_loss = os.path.join(output_dir, output_filename_format + ".losses")

    # pre-processing
    num_seqs = len(seqs)
    time_dim = max([seq.shape[0] for seq in seqs])
    pad_value = -4444
    seqs = pad_sequences([seq.tolist() for seq in seqs],
                         maxlen=time_dim,
                         dtype='float32',
                         padding='post',
                         value=pad_value)
    feat_dim = seqs[0].shape[1]
    input_shape = (time_dim, feat_dim)

    K.clear_session()

    # build network
    model = siamese_rnn_branch.SiameseRnnBranch(
        input_shape,
        pad_value,
        params['rnn_units'],
        params['dense_units'],
        params['rnn'],
        params['dropout'],
        params['implementation'],
        params['bidirectional'],
        params['batchnormalization'],
        params['loss_function'],
        params['siamese_joint_method'],
        params['trained_modelfile_hdf5'],
        siamese_arms_activation=params['siamese_arms_activation'])

    test_indices = indices_to_drop
    train_validation_indices = np.delete(np.arange(len(seqs)), test_indices)

    train_validation_seqs = seqs[train_validation_indices]
    test_seqs = seqs[test_indices]

    train_validation_features = model.predict(train_validation_seqs)

    time_pred_start = os.times()
    test_features = model.predict(test_seqs)
    time_pred_end = os.times()

    labels = np.array(labels_str)
    train_validation_labels = labels[train_validation_indices]
    test_labels = labels[test_indices]


    auc, f1, time_classification_start, time_classification_end = \
                    linear_svm.compute_classification_errors(train_validation_features,
                                                             train_validation_labels,
                                                             test_features,
                                                             test_labels)

    main_end = os.times()

    num_calculated_sequences = len(test_seqs)

    virtual_prediction_duration = time_pred_end.user - time_pred_start.user + time_pred_end.system - time_pred_start.system
    elapsed_prediction_duration = time_pred_end.elapsed - time_pred_start.elapsed

    virtual_classification_duration = time_classification_end.user - time_classification_start.user + time_classification_end.system - time_classification_start.system
    elapsed_classification_duration = time_classification_end.elapsed - time_classification_start.elapsed

    prediction = {}

    prediction['basics'] = {}
    prediction['basics']['number_of_calculated_sequences'] = len(test_seqs)

    prediction['all'] = {}
    prediction['all'][
        'virtual_prediction_duration'] = virtual_prediction_duration
    prediction['all'][
        'elapsed_prediction_duration'] = elapsed_prediction_duration

    prediction['each_seq'] = {}
    prediction['each_seq'][
        'virtual_prediction_duration_per_calculated_sequence'] = virtual_prediction_duration / num_calculated_sequences
    prediction['each_seq'][
        'elapsed_prediction_duration_per_calculated_sequence'] = elapsed_prediction_duration / num_calculated_sequences

    classification = {}

    classification['basics'] = {}
    classification['basics']['roc_auc'] = auc
    classification['basics']['f1'] = f1

    classification['all'] = {}
    classification['all'][
        'virtual_classification_duration'] = virtual_classification_duration
    classification['all'][
        'elapsed_classification_duration'] = elapsed_classification_duration

    classification['each_seq'] = {}
    classification['each_seq'][
        'virtual_classification_duration_per_calculated_sequence'] = virtual_classification_duration / num_calculated_sequences
    classification['each_seq'][
        'elapsed_classification_duration_per_calculated_sequence'] = elapsed_classification_duration / num_calculated_sequences

    dic = dict(prediction=prediction, classification=classification)

    ###
    lsvm_out_path = os.path.join(output_dir, output_file)
    file_utils.save_json(lsvm_out_path, dic)