def test_pieces_integration(piece, tempo, recording): """ Sample audio from recording and put into integration. :param piece: pieces object :param tempo: int beats per minute :param recording: str path to recording :return: """ model = Model(None, piece=piece, tempo=tempo) t = 0 q = np.load(recording)[:, :] states = [0] * model.score.N while t < len(q[0]): obs = q[:, t] current_state, prob = model.next_observation(obs) t += 1 if prob < 1.0e-110: model.alpha *= 1.0e100 states[current_state[0]] += 1 res = states[1:len(states) - 1] desired_note_length = (model.recording_speed * model.score.sub_beat.value) / tempo average_note_length = sum(res) / len(res) # Check no notes were skipped assert all(count > 0 for count in res) # Check that average note length was within acceptable range assert abs(average_note_length - desired_note_length) < LENGTH_THRESHOLD
def __init__(self, options, corpus, session): Model.__init__(self, options, corpus, session) # Key node self._pop_per_item = list() self.train() self.build_eval_graph()
def __init__(self, options, corpus, session): Model.__init__(self, options, corpus, session) self.last_save_epoch = -1 # Model parameter self._all_user_emb = None self._user_emb = None self._item_emb = None self._item_bias = None # Best parameter self._all_user_emb_best = None self._user_emb_best = None self._item_emb_best = None self._item_bias_best = None # Nodes in the graph which are used to run/feed/fetch. self._cur_user = None self._prev_item = None self._pos_item = None self._neg_item = None # Key Nodes # Train self._loss, self._loss1, self._loss2 = None, None, None self._train = None self.global_step = None self._debug = None # Op self._norm_all_user_emb_op = None self._norm_item_emb_op = None self._save_best_param_op = None self._restore_best_param_op = None # Summary self._loss_summary = None self._train_writer = None self.build_graph() self.build_eval_graph() # Properly initialize all variables. tf.global_variables_initializer().run() self.saver = tf.train.Saver() if len(self._options.checkpoint_name) > 0: ckpt_path = self._options.checkpoints_path + self._options.checkpoint_name self.load_model(ckpt_path) if self._options.write_summary: self._train_writer = tf.summary.FileWriter( "{}{}_{}/train".format(self._options.save_path, self._options.timestamp, self.to_string()), self._session.graph) self._test_writer = tf.summary.FileWriter("{}{}_{}/test".format( self._options.save_path, self._options.timestamp, self.to_string()))
def __init__(self): self.updater = Updater( "1056944645:AAELDA_hclG4RV402WNw89IE9TLt25F_OIM") self.dp = self.updater.dispatcher self.add_handlers() self.wapi = WeatherAPI() self.model = Model() self.wardrobe = Wardrobe() self.mapper = ClothesMapper()
def getTrainedAndValidatedModelWithValidation( self, model: Model, data, training_size, validator: Validator = Validator()): X, y = data # X_tr, y_tr, X_test, y_test = train_test_split(X, y) X_tr = X[:training_size, :] y_tr = y[:training_size] X_test = X[training_size:, :] y_test = y[training_size:] model.fit(X_tr, y_tr) model.set_validation(validator) validation = model.validateWithComponentNames(X_test, y_test) return model, validation
def main(args, defaults): parameters = process_args(args, defaults) os.environ['CUDA_VISIBLE_DEVICES'] = str(parameters.gpu_id) logging.basicConfig( level=logging.DEBUG, format='%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s', filename=parameters.log_path) console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s') console.setFormatter(formatter) logging.getLogger('').addHandler(console) gpu_options = tf.GPUOptions(allow_growth=True) with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)) as sess: model = Model(phase=parameters.phase, gpu_id=parameters.gpu_id, channel=parameters.channel, mean=parameters.mean, visualize=parameters.visualize, use_gru=parameters.use_gru, load_model=parameters.load_model, data_dir=parameters.data_dir, label_path=parameters.label_path, lexicon_file=parameters.lexicon_file, model_dir=parameters.model_dir, output_dir=parameters.output_dir, steps_per_checkpoint=parameters.steps_per_checkpoint, num_epoch=parameters.num_epoch, batch_size=parameters.batch_size, initial_learning_rate=parameters.initial_learning_rate, clip_gradients=parameters.clip_gradients, max_gradient_norm=parameters.max_gradient_norm, target_embedding_size=parameters.target_embedding_size, attn_num_hidden=parameters.attn_num_hidden, attn_num_layers=parameters.attn_num_layers, valid_target_length=float('inf'), session=sess) print('model init end, launch start...') model.launch()
def create_intelligent_random_trainer_agent(config_path, update_dict, env): random_trainer_config = load_config( key_list=IntelligentRandomTrainerAgent.key_list, config_path=config_path, update_dict=update_dict) trainer_agent = IntelligentRandomTrainerAgent(config=random_trainer_config, model=Model(config=None), env=env) return trainer_agent
def __init__(self, with_headset: bool, piece: Pieces = None, bpm: int = 60, local_ip: str = None, port: int = None): self.with_headset = with_headset try: if self.with_headset: assert local_ip is not None and port is not None # Connect to Websocket Server self.headset_client = HeadsetClient(local_ip, port) self.output_q = queue.Queue() logging.info(f"Waiting for Song Selection...") song = MessageBuilder.parse_message( self.headset_client.receive()) while type(song) != Pieces: logging.info( "Invalid Song Choice, Waiting for Song Selection...") time.sleep(0.05) piece, bpm = MessageBuilder.parse_message( self.headset_client.receive()) logging.info(f"Song Selected: {song}, Tempo {bpm}") else: assert piece is not None and bpm is not None except AssertionError as e: logging.error("Invalid Parameters") raise Exception(e.args) except Exception as e: logging.error("An Error Occurred") raise Exception(e.args) self.audio_client = AudioClient() self.model = Model(self.audio_client, piece=piece, tempo=bpm) self.accompaniment = AccompanimentService(self.model.score) self.tempo = KalmanFilter(self.model.score.tempo) self.math_helper = MathHelper() self.prev_state = None self.prev_note_val = None self.duration = 1
def main(): presenter = Presenter() model = Model() presenter.set_model(model) if not len(sys.argv) > 1: view = ViewGUI(presenter) else: view = ViewCLI(presenter) presenter.set_view(view) presenter.run()
from src.logger import log from src.model.model import Model seed = 1234 np.random.seed(seed) # graph num_clusters = 50 gamma = 2.5 # model dim = 50 scale = 10000 for approx_avg_degree in range(10, 51, 10): for approx_num_nodes in range(500, 5001, 500): g, actual_comm = sbm( preferential_attachment_cluster(num_clusters, gamma), approx_num_nodes, approx_avg_degree) log.write_log( f"generated graph: size {g.number_of_nodes()}, cluster size {len(actual_comm)} average degree: {2 * g.number_of_edges() / g.number_of_nodes()} max modularity: {nx.algorithms.community.quality.modularity(g, actual_comm)}" ) draw_size([len(c) for c in actual_comm], name="actual_size", log=True) embedding = Model(seed, g.number_of_nodes(), dim).deepwalk_embedding(g) log.write_log(f"scale {scale}") comm, kmeans_improved_comm, kmeans_comm = Model( seed, g.number_of_nodes(), dim).ddcrp_iterate(g, embedding, ddcrp_scale=scale) log.write_log(f"cluster size {len(kmeans_improved_comm)}")
def evaluateOnClick(self): q_model = self.algorithms_view.model() labels = self.getSelectedItemsLabels(q_model) if len(labels) > 0: models = [] for label in labels: if label == 'Lasso': models.append( LogisticRegressionCV(penalty='l2', solver='newton-cg', multi_class='multinomial')) elif label == 'Ridge': models.append( LogisticRegressionCV(penalty='l1', solver='liblinear')) elif label == 'RandomForest': models.append(RandomForestClassifier(n_estimators=1000)) elif label == 'RFECV_SVM': models.append( RFECV(estimator=SVC(gamma="scale", kernel="linear"), verbose=1)) model = Model(models) else: plain_text = self.textbox.toPlainText() json_components = json.loads(plain_text) model = Model(json_components) model = model.from_json(json_components) data = self.getDataFromFile(self.label1.text()) training_size = int(0.7 * len(data[0])) model, validation = self.getTrainedAndValidatedModelWithValidation( model, data, training_size) feature_ranking = model.feature_ranking() voting_results = model.perform_voting() QMessageBox.question( self, "Genomics Studies - Summary", "\n Voting results: \n" + "\n".join([ "Feature " + self.getPretty(i) + " : " + str(v) for (i, v) in enumerate(voting_results) ]), QMessageBox.Ok, QMessageBox.Ok) sorted_voting = [(i, v) for (v, i) in reversed( sorted([(voting_results[i], i) for i in range(len(voting_results))]))] QMessageBox.question( self, "Genomics Studies - Summary", "\n Features sorted by votes: \n" + "\n".join([ "Feature " + self.getPretty(i) + " : " + str(v) for (i, v) in sorted_voting ]), QMessageBox.Ok, QMessageBox.Ok) self.writeResultToFile( [str(v[0]) + "; " + str(v[1]) for v in sorted_voting], suggested_name="voting_results.csv", first_row="Feature number; Voting result", command="Choose output file for voting results") validation_output = [] for (comp, val_dict) in validation: validation_output.append(str(comp)) for key in val_dict: validation_output.append(" " + str(key) + ":\n " + str(val_dict[key])) validation_output.append("\n\n") self.writeResultToFile( validation_output, suggested_name="validation_results.txt", command="Choose output file for validation results") self.writeResultToFile( feature_ranking, suggested_name="feature_ranking.txt", command="Choose output file for feature ranking") results_chart_window = ResultsChartWindow(self, model)
from_timestamp, to_timestamp, average_degree, predicted_cluster_size, modularity, performance, improved_modularity, improved_performance, naive_modularity, naive_performance, ddcrp_time, response ], name=log_filename(hop, window, scale), ) hop = 1 window = 10 scale = 3000 write_first_line(hop, window, scale) start = 0 model = Model(seed, mg.number_of_nodes(), dim) end_loop = False comm: List[Set[int]] = [] while True: if end_loop: break end = start + window * fold_size if end >= len(edge_list): end = len(edge_list) - 1 end_loop = True ##### from_timestamp = timestamp(edge_list[start]) to_timestamp = timestamp(edge_list[end]) g = subgraph_by_timestamp( mg, from_timestamp,
def __init__(self, model: Model, config: dict) -> None: """ creates a new TrainManager for a model, specified as in configuration. :param model: torch module defining the model :param config: dictionary containing the training configurations """ train_config = config["training"] # files for logging and storing self.model_dir = make_model_dir(train_config["model_dir"], overwrite=train_config.get("overwrite", False)) self.logger = make_logger("{}/train.log".format(self.model_dir)) self.logging_freq = train_config.get("logging_freq", 100) self.valid_report_file = "{}/validations.txt".format(self.model_dir) self.tb_writer = SummaryWriter(log_dir=self.model_dir + "/tensorboard/") # model self.model = model self.pad_index = self.model.pad_index self.bos_index = self.model.bos_index self._log_parameters_list() # objective self.label_smoothing = train_config.get("label_smoothing", 0.0) self.loss = XentLoss(pad_index=self.pad_index, smoothing=self.label_smoothing) self.normalization = train_config.get("normalization", "batch") if self.normalization not in ['batch', 'tokens', 'none']: raise ConfigurationError("Invalid normalization option." "Valid options: " "'batch', 'tokens', 'none'.") # optimization self.learning_rate_min = train_config.get("learning_rate_min", 1.0e-8) self.clip_grad_fun = build_gradient_clipper(config=train_config) self.optimizer = build_optimizer(config=train_config, parameters=model.parameters()) # validation & early stopping self.validation_freq = train_config.get("validation_freq", 1000) self.log_valid_sents = train_config.get("priingvalid_sents", [0, 1, 2]) self.ckpt_queue = queue.Queue( maxsize=train_config.get("keep_last_ckpts", 5) ) self.eval_metric = train_config.get('eval_metric', 'bleu') if self.eval_metric not in ['bleu', 'chrf', 'token_accuracy', 'sequence_accuracy']: raise ConfigurationError("Invalid setting for 'eval_metric', " "valid options: 'bleu', 'chrf', " "'token_accuracy', 'sequence_accuracy'.") self.early_stopping_metric = train_config.get("early_stopping_metric", "eval_metric") # if we schedule after BLEU/chrf, we want to maximize it, else minimize # early_stopping_metric decides on how to find the early stopping point: # ckpts are written when there's a new high/low score for the metric if self.early_stopping_metric in ["ppl", "loss"]: self.minimize_metric = True elif self.early_stopping_metric == "eval_metric": if self.eval_metric in ["bleu", "chrf"]: self.minimize_metric = False # eval metric that has to get minimized (not yet implemented) else: self.minimize_metric = True else: raise ConfigurationError( "Invalid setting for 'early_stopping_metric', " "valid options: 'loss', 'ppl', 'eval_metric'.") # learning rate scheduling self.scheduler, self.scheduler_step_at = build_scheduler( config=train_config, scheduler_mode="min" if self.minimize_metric else "max", optimizer=self.optimizer, hidden_size=config["model"]["encoder"]["hidden_size"] ) # data & batch handling self.level = config["data"]["config"] if self.level not in ["word", "bpe", "char"]: raise ConfigurationError("Invalid segmentation level. " "Valid options: 'word', 'bpe', 'char'.") self.shuffle = train_config.get("shuffle", True) self.epochs = train_config["epochs"] self.batch_size = train_config["batch_size"] self.batch_type = train_config.get("batch_type", "sentence") self.eval_batch_size = train_config.get("eval_batch_size", self.batch_size) self.eval_batch_type = train_config.get("eval_batch_type", self.batch_type) self.batch_multiplier = train_config.get("batch_multiplier", 1) self.current_batch_multiplier = self.batch_multiplier # generation self.max_output_length = train_config.get("max_output_length", None) # CPU / GPU self.use_cuda = train_config["use_cuda"] if self.use_cuda: self.model.cuda() self.loss.cuda() # initialize accumulated batch loss (needed for batch_multiplier) self.norm_batch_loss_accumulated = 0 # initialize training statistics self.steps = 0 # stop training if this flag is True by reaching learning rate minimum self.stop = False self.total_tokens = 0 self.best_ckpt_iteration = 0 # initial values for best scores self.best_ckpt_scores = np.inf if self.minimize_metric else -np.inf # comparision function for scores self.is_best = lambda score: score < self.best_ckpt_scores \ if self.minimize_metric else score > self.best_ckpt_scores # model parameters if "load_model" in train_config.keys(): model_load_path = train_config["load_model"] self.logger.info("Loading model from %s", model_load_path) reset_best_ckpt = train_config.get("reset_best_ckpt", False) reset_scheduler = train_config.get("reset_scheduler", False) reset_optimizer = train_config.get("reset_optimizer", False) self.init_from_checkpoint(model_load_path, reset_best_ckpt=reset_best_ckpt, reset_scheduler=reset_scheduler, reset_optimizer=reset_optimizer)
def __init__(self): self._model = Model() self._view = View() self._controller = Controller()
class ProgramMode(ABC): """klasa bazowa trybów programu takich jak Menu, Edytor poziomów, Gra""" runMode = True def __init__(self): self._model = Model() self._view = View() self._controller = Controller() def run(self): #główne pętla aktualnego trybu programu while self._model.get_run_mode(): if (self._model.get_change_mode()): self.change_mode() #przetwarzanie danych wejściowych self.process_input() #aktualizacja stanu modelu self.update() #renderowanie self.render() #ograniczenie fps - a jednak clock = py.time.Clock() clock.tick(120) #metoda tworząca odpowiedni nowy tryb i uruchamiająca go @abstractmethod def change_mode(self): pass #metoda, która zajmuje się wszelkimi rzeczami związanymi z danymi wejściowymi od użytkownika #@abstractmethod def process_input(self): self._controller.get_controls(self._view) self._controller.process_input() #metoda, która zajmuje się wszelkimi rzeczami związanymi z aktualizowaniem stanu wewnętrzego modelu #@abstractmethods def update(self): self._controller.give_command(self._model) self._model.update() #metoda, która zajmuje się wszelkimi rzeczami związanymi z renderowaniem obiektów na ekran #@abstractmethod def render(self): if self._model.get_error() == 0: return self._controller.communicateMV(self._model, self._view) self._view.render() def set_volume_from_file(self): file = open(define.get_options_file_path(), 'r') volume = 0 # odczyt kolejnych linii for line in file: splitted_line = line.strip().split() int_optionKey = int(splitted_line[0]) # dodanie informacji do tablicy opcji if int_optionKey == OptionKey.VOLUME: volume = int(splitted_line[1]) py.mixer_music.set_volume(volume / 100) file.close()
class Follower: """ Class that wraps together all the components needed to perform score following. """ def __init__(self, with_headset: bool, piece: Pieces = None, bpm: int = 60, local_ip: str = None, port: int = None): self.with_headset = with_headset try: if self.with_headset: assert local_ip is not None and port is not None # Connect to Websocket Server self.headset_client = HeadsetClient(local_ip, port) self.output_q = queue.Queue() logging.info(f"Waiting for Song Selection...") song = MessageBuilder.parse_message( self.headset_client.receive()) while type(song) != Pieces: logging.info( "Invalid Song Choice, Waiting for Song Selection...") time.sleep(0.05) piece, bpm = MessageBuilder.parse_message( self.headset_client.receive()) logging.info(f"Song Selected: {song}, Tempo {bpm}") else: assert piece is not None and bpm is not None except AssertionError as e: logging.error("Invalid Parameters") raise Exception(e.args) except Exception as e: logging.error("An Error Occurred") raise Exception(e.args) self.audio_client = AudioClient() self.model = Model(self.audio_client, piece=piece, tempo=bpm) self.accompaniment = AccompanimentService(self.model.score) self.tempo = KalmanFilter(self.model.score.tempo) self.math_helper = MathHelper() self.prev_state = None self.prev_note_val = None self.duration = 1 def _reset_probabilities(self, prob): """ Resets probabilities in alpha table if they get too small to prevent underflow :param prob: Probability returned by integration :return: None """ if prob < 1.0e-110: self.model.alpha *= 1.0e100 def _play_accompaniment(self, current_state): """ Play accompaniment given current state :param current_state: state predicted by integration :return: None """ if self.prev_state is not None and 2 >= current_state[ 0] - self.prev_state >= 0: note_event = current_state[0] self.accompaniment.play_accompaniment(note_event) def _update_tempo(self, current_state): """ Calculate expected duration of played note and compare/adjust to expected tempo :return: None """ # calculate how many frames per beat were observed in the last note if current_state[0] > 1 and current_state[ 0] != self.model.score.N and self.duration > 0: prev_expected_duration = self.model.score.notes[ self.prev_note_val].duration if type( self.model.score.notes[self.prev_note_val - 1].duration ) is int else self.model.score.notes[self.prev_note_val - 1].duration observed_fpb = self.duration * (1 / prev_expected_duration ) # This might be one off. observed_tempo = self.audio_client.frames_per_min / observed_fpb print("Observed Tempo: ", observed_tempo) # perform kalman filter update. if abs(observed_tempo - self.model.score.tempo) < 20: self.tempo.next_measurement(observed_tempo) if abs(self.tempo.current_estimate - self.model.score.tempo ) > 5 and abs(self.tempo.current_estimate - self.model.score.tempo) < 60: self.model.score.tempo = self.tempo.current_estimate self.model.update_tempo() def _get_observation(self, i): """ Get observation from queue :param i: :return: """ if i == 0: return self.model.mu["2"] # Bullshit note to set alpha correctly. else: return self.audio_client.q.get() def _send_accompaniment_to_headset(self, current_state): message = MessageBuilder.build_accompaniment_message( self.model.score.parts[:, current_state[0]]) self.output_q.put(message) pass def follow(self): ts = time.time() print("Start time: ", ts) try: record_thread = RecordThread(self.audio_client) record_thread.start() if self.with_headset: headset_thread = HeadSetCommThread(self) headset_thread.start() logging.info("Waiting for Start Signal...") while MessageBuilder.parse_message( self.headset_client.receive()) != MessageType.Start: time.sleep(.05) logging.info("Start Signal Received.") i = 0 while True: # Get observation from audio client queue and perform forward algorithm step obs = self._get_observation(i) current_state, prob = self.model.next_observation(obs) print(current_state, prob, self.duration, self.tempo.current_estimate) i += 1 self._reset_probabilities(prob) if not self.with_headset: self._play_accompaniment(current_state) else: self._send_accompaniment_to_headset(current_state) # get true event of current state, i.e. the half note when sub-beat is eighth. played_note_val = self.model.score.get_true_note_event( current_state[0]) if self.prev_state is None: self.prev_state = current_state[0] self.prev_note_val = self.model.score.get_true_note_event( self.prev_state) continue else: self.prev_note_val = self.model.score.get_true_note_event( self.prev_state) # Have we moved onto the next note if played_note_val == self.prev_note_val: self.duration += 1 self.prev_state = current_state[0] else: self._update_tempo(current_state) self.duration = 0 self.prev_state = current_state[0] self.prev_note_val = played_note_val finally: print("Time Elapsed: ", time.time() - ts)
def test(): defaults = exp_config.ExpConfig parameters = dict() parameters['log_path'] = 'log.txt' parameters['phase'] = 'train' parameters['visualize'] = defaults.VISUALIZE parameters['data_path'] = 'train.txt' parameters['data_root_dir'] = '../data/date' parameters['lexicon_file'] = 'lexicon.txt' parameters['output_dir'] = defaults.OUTPUT_DIR parameters['batch_size'] = 4 parameters['initial_learning_rate'] = 1.0 parameters['num_epoch'] = 30 parameters['steps_per_checkpoint'] = 200 parameters['target_vocab_size'] = defaults.TARGET_VOCAB_SIZE parameters['model_dir'] = '../output' parameters['target_embedding_size'] = 10 parameters['attn_num_hidden'] = defaults.ATTN_NUM_HIDDEN parameters['attn_num_layers'] = defaults.ATTN_NUM_LAYERS parameters['clip_gradients'] = defaults.CLIP_GRADIENTS parameters['max_gradient_norm'] = defaults.MAX_GRADIENT_NORM parameters['load_model'] = defaults.LOAD_MODEL parameters['gpu_id'] = defaults.GPU_ID parameters['use_gru'] = False logging.basicConfig( level=logging.DEBUG, format='%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s', filename=parameters['log_path']) console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s') console.setFormatter(formatter) logging.getLogger('').addHandler(console) gpu_options = tf.GPUOptions(allow_growth=True) with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)) as sess: model = Model( phase=parameters['phase'], visualize=parameters['visualize'], data_path=parameters['data_path'], data_root_dir=parameters['data_root_dir'], output_dir=parameters['output_dir'], batch_size=parameters['batch_size'], initial_learning_rate=parameters['initial_learning_rate'], num_epoch=parameters['num_epoch'], steps_per_checkpoint=parameters['steps_per_checkpoint'], target_vocab_size=parameters['target_vocab_size'], model_dir=parameters['model_dir'], target_embedding_size=parameters['target_embedding_size'], attn_num_hidden=parameters['attn_num_hidden'], attn_num_layers=parameters['attn_num_layers'], clip_gradients=parameters['clip_gradients'], max_gradient_norm=parameters['max_gradient_norm'], load_model=parameters['load_model'], valid_target_length=float('inf'), gpu_id=parameters['gpu_id'], use_gru=parameters['use_gru'], session=sess) model.launch()
class Bot: def __init__(self): self.updater = Updater( "1056944645:AAELDA_hclG4RV402WNw89IE9TLt25F_OIM") self.dp = self.updater.dispatcher self.add_handlers() self.wapi = WeatherAPI() self.model = Model() self.wardrobe = Wardrobe() self.mapper = ClothesMapper() def start(self): self.updater.start_polling() self.updater.idle() @staticmethod def get_help(bot, update): update.message.reply_text("Send photo to get clothes recommendation") def save_clothes(self, bot, update, category): img_name = "{}_{}.jpg".format(update.effective_user.id, random.randint(0, 200000)) img_path = "../data/images/{}".format(img_name) img_id = update.message.photo[-1].file_id img = bot.get_file(img_id) img.download(img_path) img = load_sample(img_path) self.wardrobe.add_clothes(img_name, category) def get_clothes_for_weather(self, bot, update, temp=None, season=None): img_path = "image1.jpg" img_id = update.message.photo[-1].file_id img = bot.get_file(img_id) img.download(img_path) img = load_sample(img_path) weather = self.model.get_weather(img) if temp is None and season is None: season, temp = self.wapi.get_now_season( ), self.wapi.get_temperature_by_city("Moscow, RU") else: temp = float(temp) items = self.mapper.get_item_types_for_weather(season, temp, weather[0])[0] error = self.mapper.get_item_types_for_weather(season, temp, weather[0])[1] if error: update.message.reply_text( 'Введенные данные противоречивы. Попробуйте другое фото') else: update.message.reply_text('Погода сейчас: ' + weather[0]) # getting pics for clothes items and sending to the user for item in items: update.message.reply_text(item[0]) pics_list = self.wardrobe.retrieve_clothes_for_type(item[1]) index = random.randint(0, len(pics_list) - 1) random_pic = pics_list[index] bot.send_photo(update.effective_chat.id, open(random_pic, 'rb')) def image_handler(self, bot, update): if update.message.caption.split()[0] == "/clothes": self.save_clothes(bot, update, update.message.caption.split()[1]) elif update.message.caption.split()[0] == "/look": try: temp = update.message.caption.split()[1] season = update.message.caption.split()[2] self.get_clothes_for_weather(bot, update, temp, season) except Exception: self.get_clothes_for_weather(bot, update) else: update.message.reply_text("Unknown command") def add_handlers(self): self.dp.add_handler(MessageHandler(Filters.photo, self.image_handler)) self.dp.add_handler(CommandHandler("help", self.get_help))
]) for approx_avg_degree in range(10, 51, 10): for approx_num_nodes in range(500, 2001, 500): g, actual_comm = sbm( preferential_attachment_cluster(num_clusters, gamma), approx_num_nodes, approx_avg_degree) graph_size = g.number_of_nodes() average_degree = 2 * g.number_of_edges() / g.number_of_nodes() cluster_size = len(actual_comm) max_modularity = nx.algorithms.community.quality.modularity( g, actual_comm) max_performance = nx.algorithms.community.quality.performance( g, actual_comm) embedding = Model(seed, g.number_of_nodes(), dim).deepwalk(g, deepwalk_epochs) for scale in range(1000, 30000, 1000): t0 = time.time() comm_list = Model(seed, g.number_of_nodes(), dim).ddcrp(g, embedding, ddcrp_scale=scale, ddcrp_iterations=ddcrp_iterations) ddcrp_time = time.time() - t0 comm_list = comm_list[ddcrp_cutoff:] comm, _ = Model.mcla(comm_list) predicted_cluster_size = len(comm) modularity = nx.algorithms.community.quality.modularity(g, comm) performance = nx.algorithms.community.quality.performance(g, comm) improved_comm = Model.kmeans(embedding, comm) improved_modularity = nx.algorithms.community.quality.modularity(
sys.path.append('../../') from src.music.note import Pitch from src.scripts.follow import RecordThread import numpy as np from scipy.stats import multivariate_normal from src.interface.audio import AudioClient from src.model.model import Model if __name__ == "__main__": audio_client = AudioClient() model = Model(audio_client) live = True if live: record_thread = RecordThread(audio_client) record_thread.start() i = 0 while True: pitch = -1 pitch_sp = -1 prob = 0 prob_sp = 0 obs = audio_client.q.get().squeeze() for k in range(-1, 12):