def parse_args(args): parser = ArgumentParser() parser.add_argument('tests', nargs='+') parser.add_argument('-d', '--duration', type=int, default=200, help='Graph over duration hours (default 200)') parser.add_argument('-s', '--smoothing', type=int, default=12, help='Rolling average hours (defaults to 12)') return parser.parse_args(args)
def __init__(self): """ Initializes the extension """ super(DockerExtension, self).__init__() self.docker_client = docker.from_env() self.subscribe(KeywordQueryEvent, KeywordQueryEventListener()) self.subscribe(ItemEnterEvent, ItemEnterEventListener()) parser = ArgumentParser() parser.add_argument('-c', '--c', action='store', dest='container_id') parser.add_argument('-a', '--a', action='store_true', default=False, dest='all_containers') parser.add_argument('-i', '--i', action='store_true', default=False, dest='info') self.arg_parser = parser self.list_containers_view = ListContainersView(self) self.container_details_view = ContainerDetailsView(self) self.info_view = InfoView(self) self.utils_view = UtilsView(self) Notify.init("DockerExtension")
def parse_args(): parser = ArgumentParser( description=make_description("download the images")) parser.add_images_source_dir_argument() parser.add_dataset_dir_argument() parser.add_argument( "--no-dataset", action="store_true", help="If given, do not download the dataset (~13GB).", ) return parser.parse_args()
def parse_args(args): parser = ArgumentParser() parser.add_argument('words', nargs='+') parser.add_argument('-r', '--release', type=str, default='approved', help='The name or version of the release') return parser.parse_args(args)
def parse_args(args): parser = ArgumentParser() parser.add_argument('msg', nargs='*', default='') parser.add_argument('-m', '--mood', type=str, default='default', help='PixieBoots mood') return parser.parse_known_args(args)
def train_kpca(): THRESHOLD = 0.95 # Proportion of representation when choosing the best eigen vectors args = arguments.get_arguments() # Testing set characteristics TESTING_SET_SIZE = args.testing_set_size # Training set characteristics TRAINING_SET_SIZE = args.training_set_size # Amount of training images for each individual # Load images if (args.verbose): print('Loading images') training_images, training_classes, testing_images, testing_classes, profiles, profile_map = imageHandler.load_images( training_size=TRAINING_SET_SIZE, testing_size=TESTING_SET_SIZE, image_dir=args.images) TRAINING_IMAGES = len(training_images) # Create matrix out of images matrix = ((np.matrix(training_images)) - 127.5) / 127.5 # Calculate K matrix and get eigen values and eigen vectors if (args.verbose): print('Calculating eigen values and eigen vectors') eig_values, eig_vectors, K = calculate_kernel_eigen( matrix=matrix, qr_method=args.qr_method, eig_method=args.eig_method, TRAINING_IMAGES=TRAINING_IMAGES) # Get best eigenvalues if (args.verbose): print('Getting representative eigen values') eigen_faces = get_best_eig_vectors(eig_values, eig_vectors, THRESHOLD) if (args.verbose): print('number of eigen faces used: ', eigen_faces.shape[1]) training_projection = np.dot(K.T, eigen_faces) clf = svm.LinearSVC() clf.fit(training_projection, training_classes) testing_projection = get_testing_data(matrix, eigen_faces, testing_images, K) classifications = clf.score(testing_projection, testing_classes) return clf, eigen_faces, matrix, K, classifications, profiles, profile_map
def train(): THRESHOLD = 0.95 # Proportion of representation when choosing the best eigen vectors args = arguments.get_arguments() # Training set characteristics TRAINING_SET_SIZE = args.training_set_size #Amount of training images for each individual # Testing set characteristics TESTING_SET_SIZE = args.testing_set_size #Amount of testing images for each individual # Load images if (args.verbose): print('Loading images') training_images, training_classes, testing_images, testing_classes, profile, profiles_map = imageHandler.load_images(training_size=TRAINING_SET_SIZE, testing_size=TESTING_SET_SIZE, image_dir=args.images) # Create matrix out of images matrix = (np.matrix(training_images)).T / 255. # Calculate mean different faces mean = matrix.mean(axis=1) imageHandler.save_image(mean, "mean.pgm") # Center matrix centered_matrix = (matrix - mean) # Calculate the covariance matrix # Calculate centered matrix * transposed centered matrix to get a similar matrix to the one of the covariance if(args.verbose): print('Calculating covariance matrix') covariance_matrix = (centered_matrix.T).dot(centered_matrix) # Calculate eigen values and eigen vectors if (args.verbose): print('Calculating eigen values and eigen vectors') if args.type == "pca": eig_values, eig_vectors = calculate_eigen(matrix=covariance_matrix, qr_method=args.qr_method, eig_method=args.eig_method) elif args.type == "kpca": eig_values, eig_vectors = calculate_kernel_eigen(matrix=covariance_matrix, qr_method=args.qr_method, eig_method=args.eig_method) else: raise ValueError("The type is not supported") # Get best eigenvalues if(args.verbose): print('Getting representative eigen values') best_eig_vectors = get_best_eig_vectors(eig_values, eig_vectors, THRESHOLD) # Calculate images # http://blog.manfredas.com/eigenfaces-tutorial/ eigen_faces = centered_matrix.dot(best_eig_vectors) # Normalize eigen faces optimization row_sums = np.linalg.norm(eigen_faces, axis=0) eigen_faces = np.divide(eigen_faces,row(row_sums)) # Project values on eigen vectors if(args.verbose): print('Projecting values on eigen vectors') projected_values = eigen_faces.T.dot(centered_matrix) # Write image files imageHandler.save_images(images=eigen_faces.T) # Generate matrices from loaded images test_matrix = np.matrix(testing_images).T/255. test_matrix = test_matrix - mean testing_set = eigen_faces.T.dot(test_matrix) #Test images clf = svm.LinearSVC() # Training classifier with provided data set+group clf.fit(projected_values.T, training_classes) classifications = clf.score(testing_set.T, testing_classes) return mean, eigen_faces, clf, classifications, profile, profiles_map
import tkFileDialog from Tkinter import Tk from PIL import Image from pca import pca from utils import ArgumentParser as arguments from kpca import kpca from utils.ProfileHandler import Profile args = arguments.get_arguments() if args.type == "kpca": print("doing kpca") clf, best_eig_vectors, matrix, K, success_rate, profiles, profile_map = kpca.train_kpca( ) print "\n----------------------------------------" print "Correct Classifications", success_rate * 100, "%" print "----------------------------------------" # while True: # test_image = list(None for i in range(1)) # dir = "../att_faces/s3/8.pgm" # test_image[0] = list(Image.open(dir).getdata()) # classification = kpca.test_kpca(clf, best_eig_vectors, matrix, K, test_image) # print("\n----------------") # print("classification: ") # print(classification) # print("----------------")
import argparse import numpy as np import tensorflow as tf from queue import Queue from core.Classifier import * from core.efficientnet.utils import * from utils.Utils import * from utils.Teacher import * from utils.ArgumentParser import * from utils.Tensorflow_Utils import * args = ArgumentParser().parse_args() args['warmup_iteration'] = int(args['max_iteration'] * 0.05) # warmup iteration = 5% model_name = '{}-{}-EfficientNet-{}'.format(args['experimenter'], get_today(), args['option']) if not args['multi_scale']: width_coeff, depth_coeff, resolution, dropout_rate = efficientnet.efficientnet_params( 'efficientnet-{}'.format(args['option'])) args['max_image_size'] = resolution num_gpu = len(args['use_gpu'].split(',')) os.environ["CUDA_VISIBLE_DEVICES"] = args['use_gpu'] args['batch_size'] = args['batch_size_per_gpu'] * num_gpu
def make_parser(): parser = ArgumentParser(description=make_description("training")) parser.add_argument( "style", type=str, nargs="*", help= ("Style images for which the training is performed successively. If " "relative path, the image is searched in IMAGES_SOURCE_DIR. Can also be a " "valid key from the built-in images. Defaults to all built-in style images." ), ) parser.add_images_source_dir_argument() parser.add_models_dir_argument() parser.add_dataset_dir_argument() parser.add_impl_params_and_instance_norm_arguments() parser.add_device_argument() return parser