def setUp(self): settings.INPUT_DIRECTORY = 'test_audio' self.sounds = [ sound_file.SoundFile('drums.wav'), sound_file.SoundFile('noise.wav'), sound_file.SoundFile('synth.wav') ] experiment.Experiment.load_experiment_settings('mfcc_basic.json') analyze.Analyzer.init_features_list() self.analyzer = analyze.Analyzer(project=None)
def __init__(self, sounds): """ Analyze a set of sounds, aggregate features and standardize series based on this """ self.data = {} if settings.VERBOSE: print('Analyzing all sound files in project...') analyzer = analyze.Analyzer(project=None) analyzer.analyze_multiple(sounds, standardize=False) if settings.VERBOSE: print('Calculating standardization parameters...') s = standardizer.Standardizer(sounds) s.calculate_feature_statistics() self.data['feature_statistics'] = s.feature_statistics s.add_standardized_series()
def make_layer_share_plot(): """produce plot-ready .csv for layer sharing analysis""" out_file = "./result/layer_share.csv" az, lines = analyze.Analyzer(), [] total_count = az.query_layer_count() for share_count in range(0, 51, 10): if share_count == 0: share_count += 1 query_result = az.query_layer_top_size(share_count) # the layer ratio reports percentage layer_ratio = str(query_result[2]) # total size reports in mb total_size = str(query_result[3]/1000) # compose the line line = [str(share_count), layer_ratio, total_size] # append the meta-data if not lines: line.append("count|percentage|size in gbytes") lines.append(",".join(line) + "\n") with open(out_file, "w") as f: f.writelines(lines)
# Github: https://github.com/fulcrum1378 # All rights reserved. import simple_http_server.server as server import socket import webbrowser import data data.get_personal() data.get_config() import analyze, watch print("SQLite:", data.do_connect() is not None) print("Mofid:", data.init_mofid(), "\n") data.init_analyzer(analyze.Analyzer()) data.init_watcher(watch.Watcher()) my_ip = socket.gethostbyname(socket.gethostname()) webbrowser.open("http://" + str(my_ip) + ":1399/") while True: try: server.scan("", r".*controller.*") server.start(host=my_ip, port=1399) except Exception as e: print("ERROR STARTING SERVER:", e) my_ip = input("PLEASE ENTER YOUR CORRECT IP ADDRESS:") else: break
def _patch_func(step): analyzer = analyze.Analyzer() analyzer.patch_layer_size_async(start_rank=step, least_share_count=0)
dest='input_files', nargs=3, type=str, help='The filenames of target sound, input sound and output sound respectively', required=True ) arg_parser.add_argument( '--experiment-settings', dest='experiment_settings', type=str, help='Filename of json file in the experiment_settings folder. This file specifies which' ' features to use as neural input and for similarity calculations.', required=False, default="mfcc_basic.json" ) args = arg_parser.parse_args() experiment.Experiment.load_experiment_settings(args.experiment_settings) analyze.Analyzer.init_features_list() target_sound = sound_file.SoundFile(args.input_files[0], is_input=True, verify_file=True) input_sound = sound_file.SoundFile(args.input_files[1], is_input=True, verify_file=True) output_sound = sound_file.SoundFile(args.input_files[2], is_input=False, verify_file=True) that_project = project.Project([target_sound, input_sound]) analyzer = analyze.Analyzer(that_project) analyzer.analyze_multiple([output_sound]) similarity_score = fitness.LocalSimilarityFitness.get_local_similarity(target_sound, output_sound) print(similarity_score)
def __init__(self, args): self.args = args self.seed = random.randint( 1, 999999) if self.args.seed == -1 else self.args.seed if self.args.seed == -1: print('Seed: {}'.format(self.seed)) if len(self.args.input_files) != 2: raise Exception('Two filenames must be specified') self.target_sound = sound_file.SoundFile(self.args.input_files[0], is_input=True, verify_file=True) self.input_sound = sound_file.SoundFile(self.args.input_files[1], is_input=True, verify_file=True) if self.target_sound.num_samples != self.input_sound.num_samples: raise Exception( 'The target sound and the input sound must have the same duration' ) self.project = project.Project([self.target_sound, self.input_sound]) self.analyzer = analyze.Analyzer(self.project) self.fitness_evaluator = None if self.args.fitness == 'similarity': self.fitness_evaluator = fitness.LocalSimilarityFitness( self.target_sound) elif self.args.fitness == 'multi-objective': self.fitness_evaluator = fitness.MultiObjectiveFitness( self.target_sound) elif self.args.fitness == 'hybrid': self.fitness_evaluator = fitness.HybridFitness(self.target_sound) elif self.args.fitness == 'novelty': self.fitness_evaluator = fitness.NoveltyFitness(self.target_sound) elif self.args.fitness == 'mixed': self.fitness_evaluator = fitness.MixedFitness(self.target_sound) self.similarity_evaluator = fitness.LocalSimilarityFitness( self.target_sound) self.num_frames = min(self.target_sound.get_num_frames(), self.input_sound.get_num_frames()) self.neural_input_vectors = [] if self.args.neural_mode == 'a': for k in range(self.num_frames): vector = self.target_sound.get_standardized_neural_input_vector( k) vector.append(1.0) # bias input self.neural_input_vectors.append(vector) elif self.args.neural_mode == 'ab': for k in range(self.num_frames): vector = self.target_sound.get_standardized_neural_input_vector( k) vector += self.input_sound.get_standardized_neural_input_vector( k) vector.append(1.0) # bias input self.neural_input_vectors.append(vector) elif self.args.neural_mode == 'b': for k in range(self.num_frames): vector = self.input_sound.get_standardized_neural_input_vector( k) vector.append(1.0) # bias input self.neural_input_vectors.append(vector) elif self.args.neural_mode == 's': self.args.add_neuron_probability = 0.0 for k in range(self.num_frames): vector = [1.0] # bias input self.neural_input_vectors.append(vector) elif self.args.neural_mode == 'targets': for k in range(self.num_frames): self.neural_input_vectors.append([1.0]) # just bias self.effect = effect.get_effect_instance(self.args.effect_names) self.cross_adapter_class = (cross_adapt.TargetCrossAdapter if self.args.neural_mode == 'targets' else cross_adapt.CrossAdapter) self.cross_adapter = self.cross_adapter_class( input_sound=self.input_sound, neural_input_vectors=self.neural_input_vectors, effect=self.effect, parameter_lpf_cutoff=experiment.Experiment.PARAMETER_LPF_CUTOFF) experiment_data = { 'param_sound': self.target_sound.get_serialized_representation(), 'input_sound': self.input_sound.get_serialized_representation(), 'args': vars(self.args), 'experiment_settings': experiment.Experiment.experiment_settings, 'generations': [], 'feature_statistics': self.project.data['feature_statistics'], 'effect': self.effect.get_serialized_representation() } experiment.Experiment.calculate_current_experiment_id(experiment_data) experiment_data['seed'] = self.seed self.stats_logger = logger.Logger(os.path.join( settings.STATS_DATA_DIRECTORY, experiment.Experiment.folder_name, 'stats.json'), suppress_initialization=True) self.stats_logger.data = experiment_data self.max_similarity = None self.last_fitness_improvement = 0 # generation number if self.args.keep_k_best > -1: self.best_individual_ids = set() self.individual_fitness = {} # individual id => individual fitness self.individual_born = { } # individual id => generation when it was first found self.population = None self.init_neat() run_start_time = time.time() self.run() print("Run execution time: {0:.2f} seconds".format(time.time() - run_start_time)) self.final_clean_up()
def main(): requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning) users, passwords, host, csvfile, attempts, interval, equal, module, timeout, port, fireprox = args() # try to instantiate the specified module try: module = module.title() mod_name = getattr(sys.modules[__name__], module) class_name = getattr(mod_name, module) target = class_name(host, port, timeout, fireprox) except AttributeError: print(f'[!] Error loading {module} module. {module} is spelled incorrectly or does not exist') exit() # create the log file if not os.path.isdir('logs'): os.mkdir('logs') log_name = 'logs/%s.log' % host logging.basicConfig(filename=log_name, level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') ascii() output = open(csvfile, 'w') fieldnames = ['Username','Password','Response Code','Response Length'] output_writer = csv.DictWriter(output, delimiter=',', fieldnames=fieldnames) output_writer.writeheader() output.close() colors.color_print('[*] Target Module: ', colors.blue, '') print(module) colors.color_print('[*] Spraying URL: ', colors.blue, '') print(target.url) if attempts: colors.color_print('[*] Interval: ', colors.blue, '') print(f'Attempting {attempts} login(s) per user every {interval} minutes') colors.color_print('[*] Log of event times: ', colors.blue, '') print(log_name) colors.color_print('[*] Log of spray results: ', colors.blue, '') print(csvfile) print('') input('Press enter to begin:') print('') target.print_headers(csvfile) login_attempts = 0 # spray once with password = username if flag present if equal: for username in users: pword = username.split('@')[0] login(target, username, pword, csvfile) # log the login attempt logging.info(f'Login attempted as {username}') login_attempts += 1 # spray using password file for password in passwords: login_attempts = check_sleep(login_attempts, attempts, interval) for username in users: login(target, username, password, csvfile) # log the login attempt logging.info(f'Login attempted as {username}') login_attempts += 1 # close files output.close() # analyze the results to point out possible hits analyzer = analyze.Analyzer(csvfile) analyzer.analyze()