def tracktorDeserialize(path, titles=None): """ get a track from the xml format from tracktor (1?) """ tree = ET.parse(path) root = tree.getroot() tracks = {} for entry in root.find("COLLECTION").iter("ENTRY"): track = Track() track.name = entry.attrib["TITLE"] track.path = entry.find( "LOCATION").attrib["FILE"][:-4] #Removing .mp3 cues = [ cue for cue in entry.iter("CUE_V2") if cue.attrib["NAME"] != "AutoGrid" ] track.features["Cues"] = Signal( [cue.attrib["NAME"][:7] for cue in cues], times=[float(cue.attrib["START"]) / 1000 for cue in cues], sparse=True) tracks[track.path] = track if titles: return [tracks[t] if t in tracks else None for t in titles] return tracks.values() # bla = TraktorSerializer.tracktorDeserialize( # "/home/mickael/Documents/programming/dj-tracks-switch-points/evaluation/mixed in key/collection.nml") # print(bla)
def deserializeTrack(path, agreement=0.51, distanceAgreement=0.5, minimalAnnotator=0, minimalConfidence=0): """instantiate a Track from the jams encoding. https://github.com/marl/jams/ Args: ---- path (list[str]): path to the .JAMS file agreement (float, optional): minimal ratio of annotators agreeing to keep the point. Defaults to 0.51. distanceAgreement (float, optional): distance between annotations to cluster them to the same point. Defaults to 0.5. minimalAnnotator (int, optional): minimal number of annotators to keep the annotation. Defaults to 0. minimalConfidence (int, optional): minimal confidence to keep the annotation. Defaults to 0. Returns: ------- Track: a track with annotations in it's features """ reference = None track = Track() with open(path) as file: reference = json.load(file) # meta track.path = path track.features["duration"] = reference["file_metadata"]["duration"] track.name = reference["file_metadata"]["title"] switchsIn = [] switchsOut = [] for annotation in reference["annotations"]: # meta annotator = annotation["annotation_metadata"]["annotator"]["name"] # if annotator == "Marco": # continue # old format segment_open if annotation["namespace"] == "segment_open": segments = annotation["data"] track.features["boundaries"] = Signal(1, times=[segment["time"] for segment in segments], sparse=True) track.features["labels"] = [segment["value"] for segment in segments] # tempo elif annotation["namespace"] == "tempo": track.features["tempo"] = annotation["data"][0]["value"] # Current format with confidence, segment, and multiple annotators elif annotation["namespace"] == "cue_point": segments = annotation["data"] switchsIn.append([segment for segment in segments if segment["value"]["label"] == "IN"]) switchsOut.append([segment for segment in segments if segment["value"]["label"] == "OUT"]) track.features["switchIn-" + annotator] = Signal( 1, times=[segment["time"] for segment in segments if segment["value"]["label"] == "IN"], sparse=True) track.features["switchIn"] = JamsSerializer.aggregateAnnotations(switchsIn, agreementThreshold=agreement, distanceAgreement=distanceAgreement, minimalAnnotator=minimalAnnotator) # track.features["switchOut"] = JamsSerializer.aggregateAnnotations(switchsOut, # agreementThreshold=agreement, # distanceAgreement=distanceAgreement, # minimalAnnotator=minimalAnnotator) return track