def create_overview(files, start, end, num_files_per_group=8, type_output='B', to_mp3=False, artist='Glossika', album='Glossika Training', prefix=''): if num_files_per_group == 0: num_files_per_group = get_num_files(len(files)) result = [] old_start = start old_end = end start = 0 end = old_end - old_start + 1 for i in range(math.ceil((end - start + 1) / num_files_per_group)): sub_list = files[start:min(start + num_files_per_group, end)][:] result = result + sub_list + sub_list start = start + num_files_per_group type_num = '1' if type_output == 'B' else '2' dir_name = OUTPUT_ALL + '(wav)/' + sub_directory() name = _get_name(prefix, type_num, dir_name, old_start, old_end) makedir(dir_name) make_track(result, name) convert_mp3(to_mp3, name, dir_name.replace('wav', 'mp3'), artist, album) print('Shuffle Files: Done')
def create_accent_grammar(list_of_tracks, num_files_per_group, num_plays, num_copies=1, prefix='', to_mp3=False, artist='Accent', album='Accent Training', shuffled='', grammar=False): type_file = 'Accent' if not grammar else 'Grammar' artist = 'Accent' if not grammar else 'Grammar' album = 'Accent Training' if not grammar else 'Grammar Training' input_files = [] for track in list_of_tracks: sub_input_files = [] for f in sorted(os.listdir(type_file + '/' + type_file + 'EN/')): if not (f[-3:] == 'mp3' or f[-3:] == 'wav'): continue if grammar: u = f[1:4] else: u = f[6:9] if u == '%03d' % (track): sub_input_files.append(type_file + '/' + type_file + 'EN/' + f) if shuffled == "group": shuffle(sub_input_files) input_files.extend(sub_input_files) if shuffled == "all": shuffle(input_files) if prefix == '' or prefix == None: prefix = get_prefix(list_of_tracks, grammar) if num_files_per_group == 0: num_files_per_group = get_num_files(len(input_files)) generate_from_list_of_files(input_files, type_file + '/' + type_file + 'VN/', type_file, False) files = [ 'output' + type_file + '/' + f.split('/')[-1][:-6] + type_file + f.split('/')[-1][-4:] for f in input_files ] # Shuffle files for copies in range(int(num_copies)): result = shuffle_track(files, num_plays, num_files_per_group) dir_name = OUTPUT_ALL + '(wav)/' + sub_directory() makedir(dir_name) name = get_name(dir_name, prefix, num_plays) make_track(result, name) convert_mp3(to_mp3, name, dir_name.replace("wav", "mp3"), artist, album) rmtree('output' + type_file)
def create_review(files, start, end, num_plays, num_files_per_group, log=False, log_tracks=0, num_copies=1, to_mp3=False, artist='Glossika', album='Glossika Training', name=None): ''' Combine files to make them useful for Glossika Traning numPlays: each track is played numPlays times numFilesPerTrack: the number of tracks per playlist start: start track number end: end track number log: set True to print debug information logTracks: use in debug mode numCopies: number of copies of output file toMP3: set True to convert output file to .mp3 artist: album: if toMP3=True, use these values to set meta information ''' makedir('outputB') # if shuffled == 'all': shuffle(files) if num_files_per_group == 0: num_files_per_group = get_num_files(len(input_files)) # Shuffle files prefix = 'Review_%04d_%04d' % (start, end) if name is not None and not name == '': prefix = name for copies in range(int(num_copies)): result = shuffle_track(files, num_plays, num_files_per_group) dir_name = OUTPUT_ALL + '(wav)/' + sub_directory() makedir(dir_name) name = get_name(dir_name, prefix, num_plays) make_track(result, name) convert_mp3(to_mp3, name, dir_name.replace("wav", "mp3"), artist, album) print_log(log, log_tracks, result) print('Shuffle Files: Done')
finetune_model = utils.build_finetune_model(base_model, dropout=args.dropout, fc_layers=FC_LAYERS, num_classes=len(class_list)) if args.continue_training: finetune_model.load_weights("./checkpoints/" + args.model + "_model_weights.h5") print("load success!") adam = Adam(lr=0.00001) finetune_model.compile(adam, loss='categorical_crossentropy', metrics=['accuracy']) num_train_images = utils.get_num_files(TRAIN_DIR) num_val_images = utils.get_num_files(VAL_DIR) def lr_decay(epoch): if epoch % 20 == 0 and epoch != 0: lr = K.get_value(model.optimizer.lr) K.set_value(model.optimizer.lr, lr / 2) print("LR changed to {}".format(lr / 2)) return K.get_value(model.optimizer.lr) learning_rate_schedule = LearningRateScheduler(lr_decay) filepath = "./checkpoints/" + args.model + "_model_weights.h5" checkpoint = ModelCheckpoint(filepath, monitor=["acc"], verbose=1,
finetune_model = utils.build_finetune_model(base_model, dropout=args.dropout, fc_layers=FC_LAYERS, num_classes=len(class_list)) if args.continue_training: finetune_model.load_weights("./checkpoints/" + args.model + "_model_weights.h5") adam = Adam(lr=0.00001) finetune_model.compile(adam, loss='categorical_crossentropy', metrics=['accuracy']) num_train_images = utils.get_num_files(BASE_IMG_DIR + TRAIN_DIR) num_val_images = utils.get_num_files(BASE_IMG_DIR + VAL_DIR) def lr_decay(epoch): if epoch % 20 == 0 and epoch != 0: lr = K.get_value(model.optimizer.lr) K.set_value(model.optimizer.lr, lr / 2) print("LR changed to {}".format(lr / 2)) return K.get_value(model.optimizer.lr) learning_rate_schedule = LearningRateScheduler(lr_decay) filepath = "./checkpoints/" + args.model + "_model_weights.h5" checkpoint = ModelCheckpoint(filepath, monitor=["acc"], verbose=1,