コード例 #1
0
    def tracktorDeserialize(path, titles=None):
        """
        get a track from the xml format from tracktor (1?)
        """
        tree = ET.parse(path)
        root = tree.getroot()
        tracks = {}
        for entry in root.find("COLLECTION").iter("ENTRY"):
            track = Track()
            track.name = entry.attrib["TITLE"]
            track.path = entry.find(
                "LOCATION").attrib["FILE"][:-4]  #Removing .mp3
            cues = [
                cue for cue in entry.iter("CUE_V2")
                if cue.attrib["NAME"] != "AutoGrid"
            ]
            track.features["Cues"] = Signal(
                [cue.attrib["NAME"][:7] for cue in cues],
                times=[float(cue.attrib["START"]) / 1000 for cue in cues],
                sparse=True)
            tracks[track.path] = track
        if titles:
            return [tracks[t] if t in tracks else None for t in titles]
        return tracks.values()


# bla = TraktorSerializer.tracktorDeserialize(
#     "/home/mickael/Documents/programming/dj-tracks-switch-points/evaluation/mixed in key/collection.nml")
# print(bla)
コード例 #2
0
ファイル: jamsSerializer.py プロジェクト: jarey/Automix
    def deserializeTrack(path, agreement=0.51, distanceAgreement=0.5, minimalAnnotator=0, minimalConfidence=0):
        """instantiate a Track from the jams encoding. https://github.com/marl/jams/
        
        Args:
        ----
            path (list[str]): path to the .JAMS file
            agreement (float, optional): minimal ratio of annotators agreeing to keep the point. Defaults to 0.51.
            distanceAgreement (float, optional): distance between annotations to cluster them to the same point. Defaults to 0.5.
            minimalAnnotator (int, optional): minimal number of annotators to keep the annotation. Defaults to 0.
            minimalConfidence (int, optional): minimal confidence to keep the annotation. Defaults to 0.
        
        Returns:
        -------
            Track: a track with annotations in it's features
        """
        reference = None
        track = Track()
        with open(path) as file:
            reference = json.load(file)

        # meta
        track.path = path
        track.features["duration"] = reference["file_metadata"]["duration"]
        track.name = reference["file_metadata"]["title"]

        switchsIn = []
        switchsOut = []
        for annotation in reference["annotations"]:
            # meta
            annotator = annotation["annotation_metadata"]["annotator"]["name"]
            # if annotator == "Marco":
            #     continue
            # old format segment_open
            if annotation["namespace"] == "segment_open":
                segments = annotation["data"]
                track.features["boundaries"] = Signal(1, times=[segment["time"] for segment in segments], sparse=True)
                track.features["labels"] = [segment["value"] for segment in segments]
            # tempo
            elif annotation["namespace"] == "tempo":
                track.features["tempo"] = annotation["data"][0]["value"]
            # Current format with confidence, segment, and multiple annotators
            elif annotation["namespace"] == "cue_point":
                segments = annotation["data"]
                switchsIn.append([segment for segment in segments if segment["value"]["label"] == "IN"])
                switchsOut.append([segment for segment in segments if segment["value"]["label"] == "OUT"])
                track.features["switchIn-" + annotator] = Signal(
                    1, times=[segment["time"] for segment in segments if segment["value"]["label"] == "IN"], sparse=True)

        track.features["switchIn"] = JamsSerializer.aggregateAnnotations(switchsIn,
                                                                         agreementThreshold=agreement,
                                                                         distanceAgreement=distanceAgreement,
                                                                         minimalAnnotator=minimalAnnotator)
        # track.features["switchOut"] = JamsSerializer.aggregateAnnotations(switchsOut,
        #                                                                   agreementThreshold=agreement,
        #                                                                   distanceAgreement=distanceAgreement,
        #                                                                   minimalAnnotator=minimalAnnotator)
        return track
コード例 #3
0
ファイル: jamsSerializer.py プロジェクト: jarey/Automix
    def serializeTrack(path, track: Track, features=[{"namespace": "beat", "data_source": "Madmom", 'feature': "beats"}]):
        """
        Serialize a track in jams format
        """
        jam = jams.JAMS()
        jam.file_metadata.duration = track.getDuration()
        for feature in features:
            annotation = jams.Annotation(namespace=feature["namespace"])
            annotation.annotation_metadata = jams.AnnotationMetadata(data_source=feature["data_source"])

            for t in track.getFeature(feature["feature"]):
                annotation.append(time=t, duration=0.0)

            jam.annotations.append(annotation)

        jam.save(path)
コード例 #4
0
ファイル: ruleDriven.py プロジェクト: jarey/Automix
def fineTunePlaybackRate(trackA: Track, startOverlap: float, endOverlap: float,
                         trackB: Track):
    """
    Look at the difference between all the beats in both tracks during the overlap. 
    fine tune the playbackrate from track B based on the mean difference between those two
    """
    window = 0.2
    trackABeats = [
        beat for beat in trackA.getDeckTimes(trackA.features["beats"].times)
        if beat > startOverlap - window and beat < endOverlap + window
    ]
    trackBBeats = [
        beat for beat in trackB.getDeckTimes(trackB.features["beats"].times)
        if beat > startOverlap - window and beat < endOverlap + window
    ]
    # newplaybackRate = np.sqrt(np.mean(np.square(np.diff(trackABeats)))) / np.sqrt(np.mean(np.square(np.diff(trackBBeats))))
    newplaybackRate = np.mean(np.diff(trackBBeats)) / np.mean(
        np.diff(trackABeats))
    if not math.isnan(newplaybackRate):
        trackB.playRate *= newplaybackRate

    return newplaybackRate, len(trackBBeats)
コード例 #5
0
ファイル: mainCues.py プロジェクト: jarey/Automix
def main():
    # Load the tracks
    parser = argparse.ArgumentParser(description='Estimate the cue in points')
    parser.add_argument('folder',
                        type=str,
                        help="Path to the input folder containing tracks.")
    args = parser.parse_args()
    tracks = [Track(path=path) for path in config.getFolderFiles(args.folder)]

    # Estimate the cue points
    for t in tracks:
        cues = t.getCueIns()
        print(t, cues.values, cues.times)
コード例 #6
0
ファイル: ruleDriven.py プロジェクト: jarey/Automix
def generateFX(trackA: Track, startA: float, switchA: float, endA: float,
               trackB: Track, startB: float, switchB: float, endB: float):
    """
    Create a transition between two tracks and start and stop segments
    startA, endA, starB, endB, are the track times of the crossfading
    TODO: factorize !
    """
    # Fine tune playback rate
    trackB.position = trackA.getDeckTime(
        switchA
    ) - switchB / trackB.playRate  # B position = switch - distance in B from the switch
    trackA.length = (trackA.getDuration() - trackA.soffs) / trackA.playRate
    trackB.length = (trackB.getDuration() - trackB.soffs) / trackB.playRate
    startOverlap = trackB.getDeckTime(startB)
    endOverlap = trackA.getDeckTime(endA)
    fineTunePlaybackRate(trackA, startOverlap, endOverlap, trackB)

    # Fine tune position
    trackB.position = trackA.getDeckTime(
        switchA
    ) - switchB / trackB.playRate  # B poition = switch - distance in B from the switch
    trackA.length = (trackA.getDuration() - trackA.soffs) / trackA.playRate
    trackB.length = (trackB.getDuration() - trackB.soffs) / trackB.playRate
    startOverlap = trackB.getDeckTime(startB)
    endOverlap = trackA.getDeckTime(endA)
    fineTunePosition(trackA, startOverlap, endOverlap, trackB)

    # normalize the volume
    # RG is the distance to -14 dB
    trackA.FX["gainPt"].append(
        Point(position=trackA.soffs, amplitude=trackA.getReplayGain() + 11))
    trackA.FX["gainPt"].append(
        Point(position=trackA.getDuration(),
              amplitude=trackA.getReplayGain() + 11))
    trackB.FX["gainPt"].append(
        Point(position=trackB.soffs, amplitude=trackB.getReplayGain() + 11))
    trackB.FX["gainPt"].append(
        Point(position=trackB.getDuration(),
              amplitude=trackB.getReplayGain() + 11))

    # crossfading
    trackA.FX["volPt"].append(Point(position=switchA, amplitude=0, curve=1))
    trackA.FX["volPt"].append(Point(position=endA, amplitude=-100, curve=1))
    trackB.FX["volPt"].append(Point(position=startB, amplitude=-100, curve=-1))
    trackB.FX["volPt"].append(Point(position=switchB, amplitude=0, curve=-1))

    # EQ correction
    # TODO: apply the gain before doing all that
    for i, band in enumerate(["lowPt",
                              "highPt"]):  # ["lowPt", "midPt", "highPt"]
        # correction = frequencyCorrection(
        #     trackA.features["barBandsMSE"][i].getValues(trackA.getTrackTime(startOverlap), startA),
        #     trackB.features["barBandsMSE"][i].getValues(startB, endB),
        #     limit=np.max(trackA.features["barBandsMSE"][i].values))
        correction = -26
        trackB.FX[band].append(
            Point(position=startB, amplitude=correction, curve=1))
        trackB.FX[band].append(Point(position=switchB, amplitude=0, curve=1))

        # correction = frequencyCorrection(
        #     trackA.features["barBandsMSE"][i].getValues(startA, endA),
        #     trackB.features["barBandsMSE"][i].getValues(endB, trackB.getTrackTime(endOverlap)),
        #     limit=np.max(trackB.features["barBandsMSE"][i].values))
        trackA.FX[band].append(Point(position=switchA, amplitude=0, curve=-1))
        trackA.FX[band].append(
            Point(position=endA, amplitude=correction, curve=-1))

    return trackA, trackB