def test_pieces_integration(piece, tempo, recording): """ Sample audio from recording and put into integration. :param piece: pieces object :param tempo: int beats per minute :param recording: str path to recording :return: """ model = Model(None, piece=piece, tempo=tempo) t = 0 q = np.load(recording)[:, :] states = [0] * model.score.N while t < len(q[0]): obs = q[:, t] current_state, prob = model.next_observation(obs) t += 1 if prob < 1.0e-110: model.alpha *= 1.0e100 states[current_state[0]] += 1 res = states[1:len(states) - 1] desired_note_length = (model.recording_speed * model.score.sub_beat.value) / tempo average_note_length = sum(res) / len(res) # Check no notes were skipped assert all(count > 0 for count in res) # Check that average note length was within acceptable range assert abs(average_note_length - desired_note_length) < LENGTH_THRESHOLD
def create_intelligent_random_trainer_agent(config_path, update_dict, env): random_trainer_config = load_config( key_list=IntelligentRandomTrainerAgent.key_list, config_path=config_path, update_dict=update_dict) trainer_agent = IntelligentRandomTrainerAgent(config=random_trainer_config, model=Model(config=None), env=env) return trainer_agent
def __init__(self): self.updater = Updater( "1056944645:AAELDA_hclG4RV402WNw89IE9TLt25F_OIM") self.dp = self.updater.dispatcher self.add_handlers() self.wapi = WeatherAPI() self.model = Model() self.wardrobe = Wardrobe() self.mapper = ClothesMapper()
def main(): presenter = Presenter() model = Model() presenter.set_model(model) if not len(sys.argv) > 1: view = ViewGUI(presenter) else: view = ViewCLI(presenter) presenter.set_view(view) presenter.run()
def main(args, defaults): parameters = process_args(args, defaults) os.environ['CUDA_VISIBLE_DEVICES'] = str(parameters.gpu_id) logging.basicConfig( level=logging.DEBUG, format='%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s', filename=parameters.log_path) console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s') console.setFormatter(formatter) logging.getLogger('').addHandler(console) gpu_options = tf.GPUOptions(allow_growth=True) with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)) as sess: model = Model(phase=parameters.phase, gpu_id=parameters.gpu_id, channel=parameters.channel, mean=parameters.mean, visualize=parameters.visualize, use_gru=parameters.use_gru, load_model=parameters.load_model, data_dir=parameters.data_dir, label_path=parameters.label_path, lexicon_file=parameters.lexicon_file, model_dir=parameters.model_dir, output_dir=parameters.output_dir, steps_per_checkpoint=parameters.steps_per_checkpoint, num_epoch=parameters.num_epoch, batch_size=parameters.batch_size, initial_learning_rate=parameters.initial_learning_rate, clip_gradients=parameters.clip_gradients, max_gradient_norm=parameters.max_gradient_norm, target_embedding_size=parameters.target_embedding_size, attn_num_hidden=parameters.attn_num_hidden, attn_num_layers=parameters.attn_num_layers, valid_target_length=float('inf'), session=sess) print('model init end, launch start...') model.launch()
def __init__(self, with_headset: bool, piece: Pieces = None, bpm: int = 60, local_ip: str = None, port: int = None): self.with_headset = with_headset try: if self.with_headset: assert local_ip is not None and port is not None # Connect to Websocket Server self.headset_client = HeadsetClient(local_ip, port) self.output_q = queue.Queue() logging.info(f"Waiting for Song Selection...") song = MessageBuilder.parse_message( self.headset_client.receive()) while type(song) != Pieces: logging.info( "Invalid Song Choice, Waiting for Song Selection...") time.sleep(0.05) piece, bpm = MessageBuilder.parse_message( self.headset_client.receive()) logging.info(f"Song Selected: {song}, Tempo {bpm}") else: assert piece is not None and bpm is not None except AssertionError as e: logging.error("Invalid Parameters") raise Exception(e.args) except Exception as e: logging.error("An Error Occurred") raise Exception(e.args) self.audio_client = AudioClient() self.model = Model(self.audio_client, piece=piece, tempo=bpm) self.accompaniment = AccompanimentService(self.model.score) self.tempo = KalmanFilter(self.model.score.tempo) self.math_helper = MathHelper() self.prev_state = None self.prev_note_val = None self.duration = 1
from src.logger import log from src.model.model import Model seed = 1234 np.random.seed(seed) # graph num_clusters = 50 gamma = 2.5 # model dim = 50 scale = 10000 for approx_avg_degree in range(10, 51, 10): for approx_num_nodes in range(500, 5001, 500): g, actual_comm = sbm( preferential_attachment_cluster(num_clusters, gamma), approx_num_nodes, approx_avg_degree) log.write_log( f"generated graph: size {g.number_of_nodes()}, cluster size {len(actual_comm)} average degree: {2 * g.number_of_edges() / g.number_of_nodes()} max modularity: {nx.algorithms.community.quality.modularity(g, actual_comm)}" ) draw_size([len(c) for c in actual_comm], name="actual_size", log=True) embedding = Model(seed, g.number_of_nodes(), dim).deepwalk_embedding(g) log.write_log(f"scale {scale}") comm, kmeans_improved_comm, kmeans_comm = Model( seed, g.number_of_nodes(), dim).ddcrp_iterate(g, embedding, ddcrp_scale=scale) log.write_log(f"cluster size {len(kmeans_improved_comm)}")
from_timestamp, to_timestamp, average_degree, predicted_cluster_size, modularity, performance, improved_modularity, improved_performance, naive_modularity, naive_performance, ddcrp_time, response ], name=log_filename(hop, window, scale), ) hop = 1 window = 10 scale = 3000 write_first_line(hop, window, scale) start = 0 model = Model(seed, mg.number_of_nodes(), dim) end_loop = False comm: List[Set[int]] = [] while True: if end_loop: break end = start + window * fold_size if end >= len(edge_list): end = len(edge_list) - 1 end_loop = True ##### from_timestamp = timestamp(edge_list[start]) to_timestamp = timestamp(edge_list[end]) g = subgraph_by_timestamp( mg, from_timestamp,
def __init__(self): self._model = Model() self._view = View() self._controller = Controller()
def evaluateOnClick(self): q_model = self.algorithms_view.model() labels = self.getSelectedItemsLabels(q_model) if len(labels) > 0: models = [] for label in labels: if label == 'Lasso': models.append( LogisticRegressionCV(penalty='l2', solver='newton-cg', multi_class='multinomial')) elif label == 'Ridge': models.append( LogisticRegressionCV(penalty='l1', solver='liblinear')) elif label == 'RandomForest': models.append(RandomForestClassifier(n_estimators=1000)) elif label == 'RFECV_SVM': models.append( RFECV(estimator=SVC(gamma="scale", kernel="linear"), verbose=1)) model = Model(models) else: plain_text = self.textbox.toPlainText() json_components = json.loads(plain_text) model = Model(json_components) model = model.from_json(json_components) data = self.getDataFromFile(self.label1.text()) training_size = int(0.7 * len(data[0])) model, validation = self.getTrainedAndValidatedModelWithValidation( model, data, training_size) feature_ranking = model.feature_ranking() voting_results = model.perform_voting() QMessageBox.question( self, "Genomics Studies - Summary", "\n Voting results: \n" + "\n".join([ "Feature " + self.getPretty(i) + " : " + str(v) for (i, v) in enumerate(voting_results) ]), QMessageBox.Ok, QMessageBox.Ok) sorted_voting = [(i, v) for (v, i) in reversed( sorted([(voting_results[i], i) for i in range(len(voting_results))]))] QMessageBox.question( self, "Genomics Studies - Summary", "\n Features sorted by votes: \n" + "\n".join([ "Feature " + self.getPretty(i) + " : " + str(v) for (i, v) in sorted_voting ]), QMessageBox.Ok, QMessageBox.Ok) self.writeResultToFile( [str(v[0]) + "; " + str(v[1]) for v in sorted_voting], suggested_name="voting_results.csv", first_row="Feature number; Voting result", command="Choose output file for voting results") validation_output = [] for (comp, val_dict) in validation: validation_output.append(str(comp)) for key in val_dict: validation_output.append(" " + str(key) + ":\n " + str(val_dict[key])) validation_output.append("\n\n") self.writeResultToFile( validation_output, suggested_name="validation_results.txt", command="Choose output file for validation results") self.writeResultToFile( feature_ranking, suggested_name="feature_ranking.txt", command="Choose output file for feature ranking") results_chart_window = ResultsChartWindow(self, model)
sys.path.append('../../') from src.music.note import Pitch from src.scripts.follow import RecordThread import numpy as np from scipy.stats import multivariate_normal from src.interface.audio import AudioClient from src.model.model import Model if __name__ == "__main__": audio_client = AudioClient() model = Model(audio_client) live = True if live: record_thread = RecordThread(audio_client) record_thread.start() i = 0 while True: pitch = -1 pitch_sp = -1 prob = 0 prob_sp = 0 obs = audio_client.q.get().squeeze() for k in range(-1, 12):
def test(): defaults = exp_config.ExpConfig parameters = dict() parameters['log_path'] = 'log.txt' parameters['phase'] = 'train' parameters['visualize'] = defaults.VISUALIZE parameters['data_path'] = 'train.txt' parameters['data_root_dir'] = '../data/date' parameters['lexicon_file'] = 'lexicon.txt' parameters['output_dir'] = defaults.OUTPUT_DIR parameters['batch_size'] = 4 parameters['initial_learning_rate'] = 1.0 parameters['num_epoch'] = 30 parameters['steps_per_checkpoint'] = 200 parameters['target_vocab_size'] = defaults.TARGET_VOCAB_SIZE parameters['model_dir'] = '../output' parameters['target_embedding_size'] = 10 parameters['attn_num_hidden'] = defaults.ATTN_NUM_HIDDEN parameters['attn_num_layers'] = defaults.ATTN_NUM_LAYERS parameters['clip_gradients'] = defaults.CLIP_GRADIENTS parameters['max_gradient_norm'] = defaults.MAX_GRADIENT_NORM parameters['load_model'] = defaults.LOAD_MODEL parameters['gpu_id'] = defaults.GPU_ID parameters['use_gru'] = False logging.basicConfig( level=logging.DEBUG, format='%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s', filename=parameters['log_path']) console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s') console.setFormatter(formatter) logging.getLogger('').addHandler(console) gpu_options = tf.GPUOptions(allow_growth=True) with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)) as sess: model = Model( phase=parameters['phase'], visualize=parameters['visualize'], data_path=parameters['data_path'], data_root_dir=parameters['data_root_dir'], output_dir=parameters['output_dir'], batch_size=parameters['batch_size'], initial_learning_rate=parameters['initial_learning_rate'], num_epoch=parameters['num_epoch'], steps_per_checkpoint=parameters['steps_per_checkpoint'], target_vocab_size=parameters['target_vocab_size'], model_dir=parameters['model_dir'], target_embedding_size=parameters['target_embedding_size'], attn_num_hidden=parameters['attn_num_hidden'], attn_num_layers=parameters['attn_num_layers'], clip_gradients=parameters['clip_gradients'], max_gradient_norm=parameters['max_gradient_norm'], load_model=parameters['load_model'], valid_target_length=float('inf'), gpu_id=parameters['gpu_id'], use_gru=parameters['use_gru'], session=sess) model.launch()
]) for approx_avg_degree in range(10, 51, 10): for approx_num_nodes in range(500, 2001, 500): g, actual_comm = sbm( preferential_attachment_cluster(num_clusters, gamma), approx_num_nodes, approx_avg_degree) graph_size = g.number_of_nodes() average_degree = 2 * g.number_of_edges() / g.number_of_nodes() cluster_size = len(actual_comm) max_modularity = nx.algorithms.community.quality.modularity( g, actual_comm) max_performance = nx.algorithms.community.quality.performance( g, actual_comm) embedding = Model(seed, g.number_of_nodes(), dim).deepwalk(g, deepwalk_epochs) for scale in range(1000, 30000, 1000): t0 = time.time() comm_list = Model(seed, g.number_of_nodes(), dim).ddcrp(g, embedding, ddcrp_scale=scale, ddcrp_iterations=ddcrp_iterations) ddcrp_time = time.time() - t0 comm_list = comm_list[ddcrp_cutoff:] comm, _ = Model.mcla(comm_list) predicted_cluster_size = len(comm) modularity = nx.algorithms.community.quality.modularity(g, comm) performance = nx.algorithms.community.quality.performance(g, comm) improved_comm = Model.kmeans(embedding, comm) improved_modularity = nx.algorithms.community.quality.modularity(