def document_message(msg): logger.info("{:s}: [DOCUMENT] {:s}".format(msg.from_user.first_name, msg.document.file_id)) try: os.makedirs("./documents") except FileExistsError: pass file_info = bot.get_file(msg.document.file_id) utils.file_download(file_info, './documents/')
def send_message(message): if message.text: if message.text.find('/cancel') != -1: bot.send_message(message.chat.id, ru_strings.CANCEL_MESSAGE['strings'][0], parse_mode='Markdown') else: bot.send_message(config.send_chat_id, message.text, parse_mode='Markdown') logger.info("Sending message {:s} to chat {:d}".format( message.text, config.send_chat_id)) bot.send_message(message.chat.id, ru_strings.SEND_MSG_MESSAGE['strings'][1], parse_mode='Markdown') elif message.photo: try: os.makedirs("./photos") except FileExistsError: pass file_id = message.photo[len(message.photo) - 1].file_id file_info = bot.get_file(file_id) file_patch = utils.file_download(file_info, './photos/') with open(file_patch, 'rb') as photo: bot.send_photo(config.send_chat_id, photo) logger.info("Sending photo {:s} to chat {:d}".format( file_id, config.send_chat_id)) bot.send_message(message.chat.id, ru_strings.SEND_MSG_MESSAGE['strings'][1], parse_mode='Markdown')
def photo_receive(message): ''' Recognizes photos with the comment "персик" Ban for NSFW ''' file_id = message.photo[len(message.photo) - 1].file_id if message.caption and message.forward_from is None: if re.match('(?i)(\W|^).*?!п[eеэ][pр][cс](и|ч[eеи]к).*?(\W|$)', message.caption): bot.reply_to(message, "".join([ message.from_user.first_name, picturedetect.reply_get_concept_msg(file_id) ]), parse_mode='Markdown') logger.info("Photo by Username @{:s} | ID {:s}".format( (message.from_user.username or "NONE"), file_id)) try: os.makedirs("./photos") except FileExistsError: pass file_patch = './photos/{:s}.jpg'.format(file_id) _file = Path(file_patch) if _file.is_file() is not True: file_info = bot.get_file(file_id) file_patch = utils.file_download(file_info, './photos/') if file_patch is None: logger.error("File download error!'") bot.reply_to(message, ru_strings.SOME_ERROR_MESSAGE['strings'], parse_mode='Markdown') bot.send_sticker(message.chat.id, ru_strings.SOME_ERROR_MESSAGE['stickers'][0]) return if not allow_nsfw: if picturedetect.nsfw_test(file_patch, 0.75): bot.delete_message(message.chat.id, message.message_id) bot.send_message( message.chat.id, "*{} уходит в бан на {} {}! Причина: NSFW*".format( message.from_user.first_name, 2, 'мин.'), parse_mode='Markdown') ban_user(message.chat.id, message.from_user.id, 2 * 60)
def load_submission(self, event): #global student_track, backing_track, reference_track sound_id = self.submissions_listbox.get(tk.ANCHOR) #LOGGER.info(" download files", sound_id) #submissions = utils.get_submissions_in_exercise(exercise) submission = utils.get_student_submission_by_sound_id( sound_id, self.submissions) #LOGGER.info(submission) if submission is not None: #print(submissions_listbox.) #sub_ind = [item for item in submissions_listbox] #submissions_listbox.itemconfig(sub_ind, {'bg':'red'}) version = submission['version'] self.backing_track = utils.get_backing_track( self.exercise, version) self.reference_track = utils.get_reference_track( self.exercise, version) #LOGGER.info(version, self.backing_track['download_url'], self.reference_track['download_url']) self.student_track = utils.file_download( download_url=submission['download_url'], filetype='submissions') self.backing_track = utils.file_download( download_url=self.backing_track['download_url'], filetype='backing_tracks') self.reference_track = utils.file_download( download_url=self.reference_track['download_url'], filetype='reference_tracks') #play(reference_track, backing_track, student_track) # image_file = os.path.join('performance_feedback',str(sound_id)+'.png') image_file = os.path.join( PLOTS_PATH, os.path.basename(self.student_track)[:-4] + '.png') render = ImageTk.PhotoImage(Image.open(image_file)) self.img.configure(image=render) self.img.image = render # figure = self.visualizer(version) # line = FigureCanvasTkAgg(figure, self.fig) # line.get_tk_widget().pack(side=tk.LEFT, fill=tk.BOTH) return
def extract_submission_features(exercise, submission_sound, plots_path=None): version = submission_sound['version'] ref_annotation_file = os.path.join( 'reference_tracks', exercise['name'] + '_' + version + '_ref_segments.csv') reference_track = utils.get_reference_track(exercise, version) ref_audio_file = utils.file_download( download_url=reference_track['download_url'], filetype='reference_tracks') std_audio_file = utils.file_download( download_url=submission_sound['download_url'], filetype='submissions') features = notes_singing.get_features_from_student_audio( ref_audio_file, std_audio_file, ref_annotation_file, plots_path=plots_path, method='MAST') features['file_name'] = std_audio_file return features
def reply_get_concept_msg(photo_id): try: os.makedirs("./photos") except FileExistsError: pass file_patch = './photos/{:s}.jpg'.format(photo_id) _file = Path(file_patch) if not _file.is_file(): file_info = utils.bot.get_file(photo_id) file_patch = utils.file_download(file_info, './photos/') concepts = itertools.islice(analise_photo(file_patch), config.CONCEPTS_COUNT) message, word_sets = process_concepts(concepts) logger.info("[WHATISTHIS] Photo ID {} - [{}]".format(photo_id, "|".join(word_sets))) return message
print(len(annotated_files)) for f in annotated_files: sound_id = os.path.basename(f).split('_')[0] # print(sound_id) for af in audio_files: # print(af) if sound_id in os.path.basename(af)[:4]: audio_file = os.path.basename(af) # print(sub_ref_map[audio_file]) break # print(audio_file) try: exercise = utils.get_exercise_by_name(sub_ref_map[audio_file]['exercise_name'], exercises) version = sub_ref_map[audio_file]['version'] ref = utils.get_reference_track(exercise, version) ref = utils.file_download(ref['download_url'], filetype='reference_tracks') # print(ref) std = os.path.join('submissions', audio_file) ref_audio = analysis.load_audio(ref) std_audio = analysis.load_audio(std) if series == 'hpcp': params = {'normalize_method':HPCP_NORMALIZE,'hpcp_size':HPCP_SIZE, 'dist_function':DIST_FUNCTION} ref_series = analysis.hpcp_extract(ref_audio,params['normalize_method'],params['hpcp_size']) std_series = analysis.hpcp_extract(std_audio,params['normalize_method'],params['hpcp_size']) if series =='pitch': params = {'dist_function':DIST_FUNCTION}
def extract_submission_features(exercise, submission): version = submission['version'] segmentAnnotationFile = os.path.join( 'reference_tracks', exercise['name'] + '_' + version + '_ref_segments.csv') #print(segmentAnnotationFile) reference_track = utils.get_reference_track(exercise, version) ref_audio_file = utils.file_download( download_url=reference_track['download_url'], filetype='reference_tracks') # print(ref_audio_file) ref_audio = analysis.load_audio(audio_file=ref_audio_file) # thresholding # ref_energy_mask = analysis.energy_mask(ref_audio, REF_ENERGY_THRESH) # # ref_audio = ref_audio * ref_energy_mask # ref_energy = analysis.short_time_energy(ref_audio) # ref_energy = ref_energy/np.max(ref_energy) # mean_ref_energy = np.mean(ref_energy) # ref_energy[np.where(ref_energy > 0.01*mean_ref_energy)] = 1 # ref_energy[np.where(ref_energy <= 0.01*mean_ref_energy)] = 0 ref_hpcp_vector = analysis.hpcp_extract(audio=ref_audio, normalize_method=HPCP_NORMALIZE, hpcp_size=HPCP_SIZE) # ref_hpcp_vector = analysis.hpcp_extract(audio=ref_audio,hpcp_size=HPCP_SIZE) #ref_hpcp_vector = np.array([ref_hpcp_vector[i] * ref_energy[i] for i in range(len(ref_hpcp_vector))], dtype='float32') # ref_hpcp_vector[np.where(ref_energy < REF_ENERGY_THRESH)]=0 # # ref_hpcp_vector = np.array([(np.convolve(ref_hpcp_vector[:,i], np.ones(MOV_AVG_SIZE), 'valid')/MOV_AVG_SIZE) for i in range(HPCP_SIZE)]).T ref_pitch, ref_conf = analysis.pitch_extractor(ref_audio) # ref_pitch_mask = np.zeros(len(ref_pitch)) # ref_pitch_mask[np.where(ref_pitch > 0)] = 1 # ref_pitch_mask = ref_pitch_mask[:len(ref_hpcp_vector)] #ref_hpcp_vector = np.array([ref_hpcp_vector[i] * ref_energy_mask[i] for i in range(len(ref_hpcp_vector))], dtype='float32') #ref_hpcp_vector = ref_hpcp_vector * ref_energy_mask submission_sound = submission['sounds'][0] sound_id = str(submission_sound['id']) std_audio_file = utils.file_download( download_url=submission_sound['download_url'], filetype='submissions') print(std_audio_file) std_audio = analysis.load_audio(audio_file=std_audio_file) std_pitch, std_conf = analysis.pitch_extractor(std_audio) # std_energy_mask = analysis.energy_mask(std_audio, STD_ENERGY_THRESH) # std_audio = std_audio * std_energy_mask # std_energy = analysis.short_time_energy(std_audio) # std_energy = std_energy/np.max(std_energy) # mean_std_energy = np.mean(std_energy) # std_energy[np.where(std_energy > 0.01*mean_std_energy)] = 1 # std_energy[np.where(std_energy <= 0.01*mean_std_energy)] = 0 std_hpcp_vector = analysis.hpcp_extract(audio=std_audio, normalize_method=HPCP_NORMALIZE, hpcp_size=HPCP_SIZE) # std_hpcp_vector = analysis.hpcp_extract(audio=std_audio,hpcp_size=HPCP_SIZE) #std_hpcp_vector = np.array([std_hpcp_vector[i] * std_energy[i] for i in range(len(std_hpcp_vector))], dtype='float32') # std_hpcp_vector[np.where(std_energy < STD_ENERGY_THRESH)]=0 # std_pitch_mask = np.zeros(len(std_pitch)) # std_pitch_mask[np.where(std_pitch > 0)] = 1 # std_pitch_mask = std_pitch_mask[:len(std_hpcp_vector)] # std_hpcp_vector = np.array([std_hpcp_vector[i] * std_pitch_mask[i] for i in range(len(std_hpcp_vector))], dtype='float32') #std_hpcp_vector = np.array([(np.convolve(std_hpcp_vector[:,i], np.ones(MOV_AVG_SIZE), 'valid')/MOV_AVG_SIZE) for i in range(HPCP_SIZE)]).T # std_pitch_mask = np.zeros(len(std_pitch)) # std_pitch_mask[np.where(std_pitch > 0)] = 1 # std_pitch_mask = std_pitch_mask[:len(std_hpcp_vector)] #std_hpcp_vector = np.array([std_hpcp_vector[i] * std_energy_mask[i] for i in range(len(std_hpcp_vector))], dtype='float32') #std_hpcp_vector = std_hpcp_vector * std_energy_mask #plotter.plot_pitch_contour(ref_pitch, save_file_name=Path(std_audio_file).stem) #cost, path, matrix = analysis.audio_align(ref_energy, std_energy) cost, path, matrix = analysis.audio_align(ref_hpcp_vector, std_hpcp_vector) #cost, path = fastdtw.fastdtw(ref_hpcp_vector, std_hpcp_vector, dist=dist_cosine) # ref_ser =np.append(ref_hpcp_vector.T, [ref_energy], axis=0).T # std_ser =np.append(std_hpcp_vector.T, [std_energy], axis=0).T # cost, path = fastdtw.fastdtw(ref_ser, std_ser, dist=dist_combination) #cost, path = fastdtw.fastdtw(ref_hpcp_vector, std_hpcp_vector, dist=dist_cos_W) #path = np.array(path) plotter.plot_dtw_alignment(ref_audio, std_audio, path, save_file_name=Path(std_audio_file).stem) ref_time_ticks = analysis.get_annotation(segmentAnnotationFile) std_time_ticks_dtw = analysis.get_std_time_ticks_dtw(ref_time_ticks, path) figure = plotter.performance_visualize(analysis, ref_audio, std_audio, ref_pitch, std_pitch, ref_time_ticks, std_time_ticks_dtw) figure.savefig(fname=os.path.join(image_save_path, sound_id)) plt.close() try: features = analysis.features_extract(segmentAnnotationFile, path, ref_pitch, std_pitch) if sound_id in grades: features['grade'] = grades[sound_id] else: features['grade'] = '0' features['sound_id'] = sound_id #print(features) all_features.append(features) except: print(sound_id) gc.collect() return
import settings import utils import mcclient import json #context_id = settings.CONTEXT_ID exercises = mcclient.get_full_context(settings.CONTEXT_ID)['exercises'] with open('data_{}.json'.format(settings.CONTEXT_ID), 'w') as fp: json.dump(exercises, fp) #print(len(exercises), type(exercises)) for exercise in exercises: for backing_track in exercise['backing_tracks']: utils.file_download(download_url=backing_track['download_url'], filetype='backing_tracks') for reference_track in exercise['reference_tracks']: utils.file_download(download_url=reference_track['download_url'], filetype='reference_tracks') submissions = utils.get_submissions_in_exercise(exercise) #print(len(submissions), type(submissions)) for submission in submissions: sound_files = submission['sounds'] # print(sound_files) for sound_file in sound_files: # print(sound_file['download_url']) utils.file_download(download_url=sound_file['download_url'], filetype='submissions')