# Read the training set train_images_filenames, train_labels = io.load_training_set() print('Loaded {} train images.'.format(len(train_images_filenames))) # Feature extraction with sift print('Obtaining sift features...') D, L, _, _ = feature_extraction.parallel_sift(train_images_filenames, train_labels, n_jobs=N_JOBS) print('Time spend: {:.2f} s'.format(time.time() - start)) temp = time.time() # Train Linear SVM classifier print('Training the SVM classifier...') lin_svm, std_scaler, pca = classification.train_rbf_svm( D, L, model_name='final_noprob_sift_all_svm') print('Time spend: {:.2f} s'.format(time.time() - temp)) temp = time.time() # Read the test set test_images_filenames, test_labels = io.load_test_set() print('Loaded {} test images.'.format(len(test_images_filenames))) # Feature extraction with sift, prediction with SVM and aggregation to obtain final class print('Predicting test data...') result = joblib.Parallel(n_jobs=N_JOBS, backend='threading')( joblib.delayed(parallel_testing)(test_image, test_label, lin_svm, std_scaler, None) for test_image, test_label in zip(test_images_filenames, test_labels)) correct_class = [i[0] for i in result]
import mlcv.feature_extraction as feature_extraction import mlcv.input_output as io from scripts import SESSION1 if __name__ == '__main__': start = time.time() # Read the training set train_images_filenames, train_labels = io.load_training_set() print('Loaded {} train images.'.format(len(train_images_filenames))) # Feature extraction with sift print('Obtaining sift features...') D, L, _, _ = feature_extraction.parallel_sift(train_images_filenames, train_labels) print('Time spend: {:.2f} s'.format(time.time() - start)) temp = time.time() # Train Linear SVM classifier print('Training the SVM classifier...') svm, std_scaler, pca = classification.train_rbf_svm( D, L, model_name=SESSION1['model'], save_scaler=SESSION1['scaler'], save_pca=SESSION1['pca']) print('Time spend: {:.2f} s'.format(time.time() - temp)) temp = time.time() print('\nTOTAL TRAINING TIME: {:.2f} s'.format(time.time() - start))
start = time.time() # Read the training set train_images_filenames, train_labels = io.load_training_set() print('Loaded {} train images.'.format(len(train_images_filenames))) # Feature extraction with surf print('Obtaining surf features...') D, L, _ = feature_extraction.parallel_surf(train_images_filenames, train_labels) print('Time spend: {:.2f} s'.format(time.time() - start)) temp = time.time() # Train Linear SVM classifier print('Training the SVM classifier...') lin_svm, std_scaler, pca = classification.train_rbf_svm( D, L, C=5, gamma=0.1, model_name='final_surf_30_svm') print('Time spend: {:.2f} s'.format(time.time() - temp)) temp = time.time() # Read the test set test_images_filenames, test_labels = io.load_test_set() print('Loaded {} test images.'.format(len(test_images_filenames))) # Feature extraction with surf, prediction with SVM and aggregation to obtain final class print('Predicting test data...') result = joblib.Parallel(n_jobs=N_JOBS, backend='threading')( joblib.delayed(parallel_testing)(test_image, test_label, lin_svm, std_scaler, None) for test_image, test_label in zip(test_images_filenames, test_labels))
print('Sweeping ' + sweep_mode + ' for kernel type ' + kernel + '...') for p1 in sw1: for p2 in sw2: print('p1 value ' + str(p1) + ' and p2 value ' + str(p2)) std_scaler = None pca = None if sweep_mode == 'cost': if kernel == 'linear': svm, std_scaler, pca = classification.train_linear_svm( D, L, p1, dim_reduction=23) elif kernel == 'poly': svm, std_scaler, pca = classification.train_poly_svm( D, L, p1, dim_reduction=23) elif kernel == 'rbf': svm, std_scaler, pca = classification.train_rbf_svm( D, L, p1, dim_reduction=23) elif kernel == 'sigmoid': svm, std_scaler, pca = classification.train_sigmoid_svm( D, L, p1, dim_reduction=23) else: svm, std_scaler, pca = classification.train_linear_svm( D, L, p1, dim_reduction=23) print('Time spend: {:.2f} s'.format(time.time() - temp)) temp = time.time() elif sweep_mode == 'params': if kernel == 'poly': svm, std_scaler, pca = classification.train_poly_svm( D, L, degree=p1, coef0=p2) elif kernel == 'rbf': svm, std_scaler, pca = classification.train_rbf_svm( D, L, gamma=p1)
print('Elapsed time: {:.2f} s'.format(time.time() - start)) temp = time.time() print('Creating codebook with {} visual words'.format(K)) codebook = bovw.create_codebook(D, codebook_name='dense_codebook') print('Elapsed time: {:.2f} s'.format(time.time() - temp)) temp = time.time() print('Getting visual words from training set...') vis_words, labels = bovw.visual_words(D, L, I, codebook) print('Elapsed time: {:.2f} s'.format(time.time() - temp)) temp = time.time() # Train Linear SVM classifier print('Training the SVM classifier...') lin_svm, std_scaler, pca = classification.train_rbf_svm(vis_words, labels, C=7.76, gamma=0.0013808, dim_reduction=None) print('Elapsed time: {:.2f} s'.format(time.time() - temp)) temp = time.time() # Read the test set test_images_filenames, test_labels = io.load_test_set() print('Loaded {} test images.'.format(len(test_images_filenames))) # Feature extraction with sift, prediction with SVM and aggregation to obtain final class print('Predicting test data...') test_results = joblib.Parallel(n_jobs=N_JOBS, backend='threading')( joblib.delayed(parallel_testing)(test_image, test_label, codebook, lin_svm, std_scaler, pca) for test_image, test_label in zip(test_images_filenames, test_labels)) pred_results = [x[0] for x in test_results]
print('Elapsed time: {:.2f} s'.format(time.time() - start)) temp = time.time() print('Creating codebook with {} visual words'.format(K)) codebook = bovw.create_codebook(D, codebook_name='codebook_256_dense') print('Elapsed time: {:.2f} s'.format(time.time() - temp)) temp = time.time() print('Getting visual words from training set...') vis_words, labels = bovw.visual_words(D, L, I, codebook) print('Elapsed time: {:.2f} s'.format(time.time() - temp)) temp = time.time() # Train Linear SVM classifier print('Training the SVM classifier...') lin_svm, std_scaler, pca = classification.train_rbf_svm(vis_words, labels, C=1.65, gamma=0.005926, dim_reduction=None) print('Elapsed time: {:.2f} s'.format(time.time() - temp)) temp = time.time() # Read the test set test_images_filenames, test_labels = io.load_test_set() print('Loaded {} test images.'.format(len(test_images_filenames))) # Feature extraction with sift, prediction with SVM and aggregation to obtain final class print('Predicting test data...') test_results = joblib.Parallel(n_jobs=N_JOBS, backend='threading')( joblib.delayed(parallel_testing)(test_image, test_label, codebook, lin_svm, std_scaler, pca) for test_image, test_label in zip(test_images_filenames, test_labels)) pred_results = [x[0] for x in test_results]
# Read the training set train_images_filenames, train_labels = io.load_training_set() print('Loaded {} train images.'.format(len(train_images_filenames))) # Feature extraction with sift print('Obtaining sift features...') D, L, _, _ = feature_extraction.parallel_sift(train_images_filenames, train_labels, n_jobs=N_JOBS) print('Time spend: {:.2f} s'.format(time.time() - start)) temp = time.time() # Train Linear SVM classifier print('Training the SVM classifier...') lin_svm, std_scaler, pca = classification.train_rbf_svm( D, L, dim_reduction=23, model_name='final_sift_all_svmPCA') print('Time spend: {:.2f} s'.format(time.time() - temp)) temp = time.time() # Read the test set test_images_filenames, test_labels = io.load_test_set() print('Loaded {} test images.'.format(len(test_images_filenames))) # Feature extraction with sift, prediction with SVM and aggregation to obtain final class print('Predicting test data...') result = joblib.Parallel(n_jobs=N_JOBS, backend='threading')( joblib.delayed(parallel_testing)(test_image, test_label, lin_svm, std_scaler, pca) for test_image, test_label in zip(test_images_filenames, test_labels))
# Feature extraction print('Obtaining features...') D, L, _ = train_function(train_images_filenames, train_labels, num_samples_class=num_samples, n_jobs=N_JOBS) print('Time spend: {:.2f} s'.format(time.time() - start)) temp = time.time() # Train Linear SVM classifier print('Training the SVM with RBF kernel classifier...') svm, std_scaler, pca = classification.train_rbf_svm( D, L, C=5, gamma=0.1, dim_reduction=dim_red_option, model_name='svm_{}_{}s_{}c'.format( fe_name, num_samples if num_samples > -1 else 'all', dim_red_option if dim_red_option is not None else 'all')) print('Time spend: {:.2f} s'.format(time.time() - temp)) temp = time.time() # Feature extraction with sift, prediction with SVM and aggregation to obtain final class print('Predicting test data...') result = joblib.Parallel(n_jobs=N_JOBS, backend='threading')( joblib.delayed(predict_function)(test_image, test_label, svm, std_scaler, pca) for test_image, test_label in zip(test_images_filenames, test_labels)) print('Time spend: {:.2f} s'.format(time.time() - temp))
# Feature extraction with sift print('Obtaining sift features...') D, L, _, _ = feature_extraction.parallel_sift(train_images_filenames, train_labels, num_samples_class=30, n_jobs=N_JOBS) print('Time spend: {:.2f} s'.format(time.time() - start)) temp = time.time() # Train Linear SVM classifier print('Training the SVM classifier...') lin_svm, std_scaler, pca = classification.train_rbf_svm( D, L, C=5, gamma=0.1, model_name='final_sift_30_svm_pca23', dim_reduction=23) print('Time spend: {:.2f} s'.format(time.time() - temp)) temp = time.time() # Read the test set test_images_filenames, test_labels = io.load_test_set() print('Loaded {} test images.'.format(len(test_images_filenames))) # Feature extraction with sift, prediction with SVM and aggregation to obtain final class print('Predicting test data...') result = joblib.Parallel(n_jobs=N_JOBS, backend='threading')( joblib.delayed(parallel_testing)(test_image, test_label, lin_svm,