def generate_dataframes(preprocessed_dir): composers = os.listdir(preprocessed_dir) feature_names = features_lib.FEATURES examples_train = [] examples_test = [] for composer in composers: examples_temp = [] composer_dir = os.path.join(preprocessed_dir, composer) pieces = os.listdir(composer_dir) for piece_name in pieces: filename = os.path.join(composer_dir, piece_name) piece_obj = music21.converter.parse(filename) item = {"composer": composer, "filename": piece_name} # skip piece_obj's that are too short num_notes = len(util.get_notes(piece_obj)) if num_notes < MIN_NOTES: continue for feature in feature_names: feature_func = getattr(features_lib, feature) item[feature] = feature_func(piece_obj) examples_temp.append(item) # Split examples_temp into training and testing set temp1, temp2 = split_arr(examples_temp) examples_train += temp1 examples_test += temp2 return pandas.DataFrame(examples_train), pandas.DataFrame(examples_test)
def chromaticism(piece): notes = util.get_notes(piece) note_values = list(map(lambda x: x.midi, notes)) is_chromatic = 0 for i in range(len(note_values)-2): if ((note_values[i]-note_values[i+1] == 1) and (note_values[i+1]-note_values[i+2] == 1)) or ((note_values[i+2]-note_values[i+1] == 1) and (note_values[i+1]-note_values[i] == 1)): is_chromatic = 1 return is_chromatic
def Low_notes(piece): notes = util.get_notes(piece) note_values = list(map(lambda x: x.midi, notes)) high = 0 low = 0 for i in range(len(note_values)): if note_values[i] > 64: high += 1 else: low +=1 return low/(low+high)
def Low_notes(piece): notes = util.get_notes(piece) note_values = list(map(lambda x: x.midi, notes)) high = 0 low = 0 for i in range(len(note_values)): if note_values[i] > 64: high += 1 else: low += 1 return low / (low + high)
def chromaticism(piece): notes = util.get_notes(piece) note_values = list(map(lambda x: x.midi, notes)) is_chromatic = 0 for i in range(len(note_values) - 2): if ((note_values[i] - note_values[i + 1] == 1) and (note_values[i + 1] - note_values[i + 2] == 1)) or ( (note_values[i + 2] - note_values[i + 1] == 1) and (note_values[i + 1] - note_values[i] == 1)): is_chromatic = 1 return is_chromatic
def leaps_ratio(piece): notes = util.get_notes(piece) note_values = list(map(lambda x: x.midi, notes)) leaps = 0 steps = 0 for i in range(len(note_values)-1): distance = abs(note_values[i+1]-note_values[i]) if distance > 2: leaps += 1 else: steps += 1 return leaps/(leaps+steps)
def leaps_ratio(piece): notes = util.get_notes(piece) note_values = list(map(lambda x: x.midi, notes)) leaps = 0 steps = 0 for i in range(len(note_values) - 1): distance = abs(note_values[i + 1] - note_values[i]) if distance > 2: leaps += 1 else: steps += 1 return leaps / (leaps + steps)
def range_melody(piece): notes = util.get_notes(piece) note_values = list(map(lambda x: x.midi, notes)) return max(note_values)-min(note_values)
def repeated_notes(piece): notes = util.get_notes(piece) note_values = list(map(lambda x: x.midi, notes)) return stats.mode(note_values)[1][0]
def rhythmic_variety(piece): notes = util.get_notes(piece) durations = list(map(lambda x: float(x.duration.quarterLength), notes)) num_durations = len(set(durations)) return 10 * num_durations / len(notes)
def rhythmic_variance(piece): notes = util.get_notes(piece) durations = list(map(lambda x: float(x.duration.quarterLength), notes)) mean = numpy.mean(durations) sq_diff = reduce(lambda acc, x: acc + (x - mean) * (x - mean), durations) return sq_diff / len(notes)
def num_notes(piece): return len(util.get_notes(piece))
def range_melody(piece): notes = util.get_notes(piece) note_values = list(map(lambda x: x.midi, notes)) return max(note_values) - min(note_values)