Ejemplo n.º 1
0
    def execute(self, context):
        def evaluate(co, channel, mx):
            #co = p.co
            x, y  = co.x, 1 if co.y > self.threshold / 100 * mx else 0
            #p.co = (x, y)
            return (x, y)
        
        speaker = getSpeaker(context)
        original_action = speaker.animation_data.action
        self.new_channel_name = unique_name(speaker.channels, original_action.get("channel_name", "AA"))
        newaction = copy_sound_action(speaker, self.new_channel_name)
        ch = newaction["channel_name"]
        channels = [c for sp in context.scene.objects if sp.type == 'SPEAKER' for c in sp.data.channels]

        if newaction is None:
            return {'CANCELLED'}

        start, end = newaction.frame_range
        speaker.animation_data.action = newaction
        speaker.sound.bakeoptions.channel_name =\
                unique_name(channels, self.new_channel_name)

        rna = eval(newaction["rna"])

        fcurves = [f for f in newaction.fcurves]    
        for fcurve in fcurves:
            dp = fcurve.data_path
            channel = fcurve.data_path.strip("\"'[]")
            #print(rna[channel])
            pts = [x for  p in fcurve.sampled_points for x in evaluate(p.co, channel, rna[channel]["b"])]
            #print(pts)
            rna[channel]["a"] = 0
            rna[channel]["b"] = 1
            newaction["min"] =  rna[channel]["min"] = rna[channel]["soft_min"] = 0
            newaction["max"] =  rna[channel]["max"] = rna[channel]["soft_max"] = 1
            newaction.fcurves.remove(fcurve)
            new_fcurve = newaction.fcurves.new(dp, action_group=ch)
            new_fcurve.extrapolation = 'CONSTANT'
            new_fcurve.keyframe_points.add(len(pts) // 2)
            new_fcurve.keyframe_points.foreach_set("co", pts)
            for p in new_fcurve.keyframe_points:
                p.interpolation = 'CONSTANT'
            new_fcurve.convert_to_samples(start, end)

        newaction["rna"] = str(rna)
        speaker['_RNA_UI'].update(rna)

        if self.nla_drop:
            # need to override context to use.. cbf'd
            nla_drop(speaker, newaction, 1, self.new_channel_name)

        # testcode TODO
        self.add_to_tweaks(newaction)
        newaction.normalise = 'NONE'
        #tw.type = "COPIED FROM %s" % original_action.name
        return {'FINISHED'}
    def execute(self, context):
        def evaluate(co, channel, mx):
            #co = p.co
            x, y  = co.x, 1 if co.y > self.threshold / 100 * mx else 0
            #p.co = (x, y)
            return (x, y)
        
        speaker = getSpeaker(context)
        original_action = speaker.animation_data.action
        self.new_channel_name = unique_name(speaker.channels, original_action.get("channel_name", "AA"))
        newaction = copy_sound_action(speaker, self.new_channel_name)
        ch = newaction["channel_name"]
        channels = [c for sp in context.scene.objects if sp.type == 'SPEAKER' for c in sp.data.channels]

        if newaction is None:
            return {'CANCELLED'}

        start, end = newaction.frame_range
        speaker.animation_data.action = newaction
        speaker.sound.bakeoptions.channel_name =\
                unique_name(channels, self.new_channel_name)

        rna = eval(newaction["rna"])

        fcurves = [f for f in newaction.fcurves]    
        for fcurve in fcurves:
            dp = fcurve.data_path
            channel = fcurve.data_path.strip("\"'[]")
            #print(rna[channel])
            pts = [x for  p in fcurve.sampled_points for x in evaluate(p.co, channel, rna[channel]["b"])]
            #print(pts)
            rna[channel]["a"] = 0
            rna[channel]["b"] = 1
            newaction["min"] =  rna[channel]["min"] = rna[channel]["soft_min"] = 0
            newaction["max"] =  rna[channel]["max"] = rna[channel]["soft_max"] = 1
            newaction.fcurves.remove(fcurve)
            new_fcurve = newaction.fcurves.new(dp, action_group=ch)
            new_fcurve.extrapolation = 'CONSTANT'
            new_fcurve.keyframe_points.add(len(pts) // 2)
            new_fcurve.keyframe_points.foreach_set("co", pts)
            for p in new_fcurve.keyframe_points:
                p.interpolation = 'CONSTANT'
            new_fcurve.convert_to_samples(start, end)

        newaction["rna"] = str(rna)
        speaker['_RNA_UI'].update(rna)

        if self.nla_drop:
            # need to override context to use.. cbf'd
            nla_drop(speaker, newaction, 1, self.new_channel_name)

        # testcode TODO
        self.add_to_tweaks(newaction)
        newaction.normalise = 'NONE'
        #tw.type = "COPIED FROM %s" % original_action.name
        return {'FINISHED'}
Ejemplo n.º 3
0
    def finished(self, context):
        # return to view3d

        if self.view3d is not None:
            self.view3d.type = self._view3d
        print("TOTAL BAKE TIME: %02d:%02d:%02d" %
              utils.splittime(sum(BakeSoundPanel.bake_times)))
        BakeSoundPanel.report = "Finished Baking"
        #context.area.header_text_set()
        # set up the rnas
        sp = self.speaker
        sound = self.sound
        action = sp.animation_data.action
        bakeoptions = sound.bakeoptions
        boo = bakeoptions.bake_operator
        # save non defaults to an ID prop.

        action['boo'] = bakeoptions.sound_type

        action['_RNA_UI']['boo'] = dict(boo.items())

        channel_name = action['channel_name']
        vcns = [
            "%s%d" % (channel_name, i) for i in range(bakeoptions.channels)
        ]

        sp_rna = {
            k: sp['_RNA_UI'][k].to_dict()
            for k in sp['_RNA_UI'].keys() if k in vcns
        }

        action['rna'] = str(sp_rna)

        BakeSoundPanel.baking = False
        # drop the action into the NLA
        utils.nla_drop(sp, action, 1, "%s %s" % (channel_name, channel_name))
        # normalise to action. This will set the
        action.normalise = 'ACTION'

        if context.scene.speaker is None:
            sp.is_context_speaker = True

        context.window_manager.event_timer_remove(self._timer)
        bpy.ops.graph.view_all_with_bgl_graph()
        self.add_to_tweaks(action)
        return {'FINISHED'}
    def finished(self, context):
        # return to view3d

        if self.view3d is not None:
            self.view3d.type = self._view3d
        print("TOTAL BAKE TIME: %02d:%02d:%02d" %
                  utils.splittime(sum(BakeSoundPanel.bake_times)))
        BakeSoundPanel.report = "Finished Baking"
        #context.area.header_text_set()
        # set up the rnas
        sp = self.speaker
        sound = self.sound
        action = sp.animation_data.action
        bakeoptions = sound.bakeoptions
        boo = bakeoptions.bake_operator
        # save non defaults to an ID prop.

        action['boo'] = bakeoptions.sound_type

        action['_RNA_UI']['boo'] = dict(boo.items())

        channel_name = action['channel_name']
        vcns = ["%s%d" % (channel_name, i) for i in
                range(bakeoptions.channels)]

        sp_rna = {k: sp['_RNA_UI'][k].to_dict()
                  for k in sp['_RNA_UI'].keys()
                  if k in vcns}

        action['rna'] = str(sp_rna)

        BakeSoundPanel.baking = False
        # drop the action into the NLA
        utils.nla_drop(sp, action, 1, "%s %s" %(channel_name, channel_name))
        # normalise to action. This will set the
        action.normalise = 'ACTION'

        if context.scene.speaker is None:
            sp.is_context_speaker = True

        context.window_manager.event_timer_remove(self._timer)
        bpy.ops.graph.view_all_with_bgl_graph()
        self.add_to_tweaks(action)
        return {'FINISHED'}
Ejemplo n.º 5
0
    def execute(self, context):
        speaker = getSpeaker(context)
        original_action = speaker.animation_data.action
        newaction = copy_sound_action(speaker, self.new_channel_name)
        channels = [c for sp in context.scene.objects if sp.type == 'SPEAKER' for c in sp.data.channels]

        if newaction is not None:
            speaker.animation_data.action = newaction
            speaker.sound.bakeoptions.channel_name =\
                    unique_name(channels, "AA")
                    #unique_name(channels, self.new_channel_name)

            if self.nla_drop:
                # need to override context to use.. cbf'd
                nla_drop(speaker, newaction, 1, self.new_channel_name)

            self.add_to_tweaks(newaction)
            #tw.type = "COPIED FROM %s" % original_action.name
            return {'FINISHED'}

        return {'CANCELLED'}
    def execute(self, context):
        speaker = getSpeaker(context)
        original_action = speaker.animation_data.action
        newaction = copy_sound_action(speaker, self.new_channel_name)
        channels = [c for sp in context.scene.objects if sp.type == 'SPEAKER' for c in sp.data.channels]

        if newaction is not None:
            speaker.animation_data.action = newaction
            speaker.sound.bakeoptions.channel_name =\
                    unique_name(channels, "AA")
                    #unique_name(channels, self.new_channel_name)

            if self.nla_drop:
                # need to override context to use.. cbf'd
                nla_drop(speaker, newaction, 1, self.new_channel_name)

            self.add_to_tweaks(newaction)
            #tw.type = "COPIED FROM %s" % original_action.name
            return {'FINISHED'}

        return {'CANCELLED'}
Ejemplo n.º 7
0
def read_midi_file(context, filepath, use_some_setting):
    print(filepath)
    prefs = context.user_preferences.addons["sound_drivers"].preferences
    import sys

    sys.path.append(prefs.smf_dir)
    import smf

    pin_id = context.space_data.pin_id
    sp = pin_id if pin_id is not None else context.object.data

    channels = [c for sp in context.scene.objects if sp.type == "SPEAKER" for c in sp.data.channels]
    midichannels = []
    f = smf.SMF(filepath)
    tracks = []
    for t in f.tracks:
        print("TRACK : %s" % t.track_number)
        a = {}
        # a = bpy.data.actions.new("MidiAction")

        print(channels)
        channel_name = unique_name(channels, "AA")
        channels.append(channel_name)
        a["name"] = "%s Unknown (trk %d)" % (channel_name, t.track_number)
        a["wavfile"] = sp.sound.name
        a["start"] = 0
        a["end"] = len(midi_notes)
        a["row_height"] = 0.4
        a["Channels"] = 0
        a["channel_name"] = channel_name
        a["MIDI"] = filepath
        # sp.animation_data.action = a
        a["keyframe_points"] = {}  # list of tups (dp, frame, value)

        """
        for i, n in enumerate(midi_notes):
            channel = "%s%d" % (channel_name, i)
            sp[channel] = 0
            sp.keyframe_insert('["%s"]' % channel, frame = 0)
            #sp.keyframe_insert('["%s"]' % channel, frame = 0)
            #sp.keyframe_insert('["%s"]' % channel, frame = 0)
        """
        for e in t.events:
            s = e.decode()
            print("XXXXXXXX", s, e.time_seconds)
            # print(s.startswith("Note"))

            if s.startswith("Instrument"):
                print("INSTRUMENT", s)
                a["name"] = "%s:%s (trk %d)" % (channel_name, s.title(), t.track_number)
            elif s.startswith("Key Signature"):
                print("KEYSIG", s)
            elif s.startswith("Program Change"):
                # print("PROGCHA", s)
                # this could be instrument too
                idx = int(s.split(" ")[-1])
                # print(midi_instruments[idx])
                a["name"] = "%s:%s (trk %d)" % (channel_name, midi_instruments[idx].title(), t.track_number)
            elif s.startswith("Controller"):
                continue
                print("CONTROLLER", s)
            elif s.startswith("Pitch Wheel"):
                print("PITCHWEEL", s)
                # do nothing for now
                continue
            elif s.startswith("Note"):
                v = 0
                s = s.replace("Note ", "")
                # print("StartsWithNote", s)
                c = s.split(",")
                # print("CC", c[0])
                v = int(c[3].replace(" velocity ", ""))
                if c[0] == "On":
                    note = c[2].replace(" note ", "")
                    print("ON key[%s] = %d @ %fs" % (note, v, e.time_seconds))
                elif c[0] == "Off":
                    v = 0
                    note = c[2].replace(" note ", "")
                    print("OFF key[%s] = %d @ %fs" % (note, v, e.time_seconds))
                if note not in midi_notes:
                    print("WARNING: unable to use note %s %d" % notesplit(note))
                    continue
                channel = "%s%i" % (channel_name, midi_notes.index(note))
                fcurve = a["keyframe_points"].setdefault(channel, [])
                fcurve.append((e.time_seconds * context.scene.render.fps, v))
                # sp[channel] = v
                # a["keyframe_points"].append(('["%s"]' % channel, e.time_seconds * context.scene.render.fps, v))
            else:
                # continue
                print("XXX", s)

        if len(a["keyframe_points"].keys()):
            midichannels.append(channel_name)
            midifile["TRACK : %s" % t.track_number] = a

    print(midichannels)

    keys = ["name", "wavfile", "start", "end", "row_height", "Channels", "channel_name", "MIDI"]
    actions = []
    channels = []
    action = bpy.data.actions.new("MidiAction")
    sp.animation_data.action = action
    for k, a in midifile.items():
        print("-" * 77)
        print(midifile[k]["name"])

        tracks = a["keyframe_points"]

        # action = bpy.data.actions.new("MidiAction")
        # sp.animation_data.action = action
        for key in keys:
            action[key] = a[key]
        channel = a["channel_name"]
        channels.append(channel)
        for t in tracks.keys():
            kfps = tracks[t]
            print(t)
            # fc = action.fcurves.new('["%s"]' % t)
            # make the fcurves
            sp[t] = 0
            sp.keyframe_insert('["%s"]' % t, frame=1, group=a["name"])
            for kfp in kfps:
                f, v = kfp
                sp[t] = v
                sp.keyframe_insert('["%s"]' % t, frame=f, group=a["name"])
            print("KFPS", t, len(tracks[t]))

    actions.append(action)

    # would normally load the data here

    vals = []
    sp_rna = {}
    for a in actions:

        if len(a.fcurves) <= 1:
            print("NO FCURVES IN ", a.name)
            try:
                del (a["wavfile"])
                a.user_clear()
                actions.remove(a)
                # bpy.data.actions.remove(a)
                # print("REMOVED")
            except:
                print("XCPT")
                continue
            print("WTF")
            continue

        a["Channels"] = len(a.fcurves)
        a["channels"] = channels
        channel_name = a["channel_name"]

        for fc in a.fcurves:

            for kp in fc.keyframe_points:
                kp.interpolation = "CONSTANT"
            fc_range, points = fc.minmax
            cn = fc.data_path.replace('["', "").replace('"]', "")
            print(channel_name, cn)
            n = int(cn[2:])
            f = pow(2, (n - 69) / 12.0) * 440
            high = low = f
            vals.extend(list(fc_range))
            rna = sp["_RNA_UI"]
            set_channel_idprop_rna(cn, rna, low, high, fc_range, fc_range, is_music=True)

            """
            vcns = ["%s%d" % (channel_name, i) for i in
                    range(len(midi_notes))]

            """
        sp_rna = {k: sp["_RNA_UI"][k].to_dict() for k in sp["_RNA_UI"].keys()}
        # if k in vcns}

        a["rna"] = str(sp_rna)
        a["min"] = min(vals)
        a["max"] = max(vals)

        nla_drop(sp, action, 1, "%s %s" % (channel_name, channel_name))

    return {"FINISHED"}
Ejemplo n.º 8
0
def read_midi_file(context, filepath, use_some_setting):
    print(filepath)
    prefs = context.user_preferences.addons["sound_drivers"].preferences
    import sys
    sys.path.append(prefs.smf_dir)
    import smf
    pin_id = context.space_data.pin_id
    sp = pin_id if pin_id is not None else context.object.data

    channels = [
        c for sp in context.scene.objects if sp.type == 'SPEAKER'
        for c in sp.data.channels
    ]
    midichannels = []
    f = smf.SMF(filepath)
    tracks = []
    for t in f.tracks:
        print("TRACK : %s" % t.track_number)
        a = {}
        #a = bpy.data.actions.new("MidiAction")

        print(channels)
        channel_name = unique_name(channels, 'AA')
        channels.append(channel_name)
        a["name"] = "%s Unknown (trk %d)" % (channel_name, t.track_number)
        a['wavfile'] = sp.sound.name
        a['start'] = 0
        a['end'] = len(midi_notes)
        a['row_height'] = 0.4
        a["Channels"] = 0
        a["channel_name"] = channel_name
        a['MIDI'] = filepath
        #sp.animation_data.action = a
        a["keyframe_points"] = {}  # list of tups (dp, frame, value)
        '''
        for i, n in enumerate(midi_notes):
            channel = "%s%d" % (channel_name, i)
            sp[channel] = 0
            sp.keyframe_insert('["%s"]' % channel, frame = 0)
            #sp.keyframe_insert('["%s"]' % channel, frame = 0)
            #sp.keyframe_insert('["%s"]' % channel, frame = 0)
        '''
        for e in t.events:
            s = e.decode()
            print("XXXXXXXX", s, e.time_seconds)
            #print(s.startswith("Note"))

            if s.startswith("Instrument"):
                print("INSTRUMENT", s)
                a["name"] = "%s:%s (trk %d)" % (channel_name, s.title(),
                                                t.track_number)
            elif s.startswith("Key Signature"):
                print("KEYSIG", s)
            elif s.startswith("Program Change"):
                #print("PROGCHA", s)
                # this could be instrument too
                idx = int(s.split(" ")[-1])
                #print(midi_instruments[idx])
                a["name"] = "%s:%s (trk %d)" % (channel_name,
                                                midi_instruments[idx].title(),
                                                t.track_number)
            elif s.startswith("Controller"):
                continue
                print("CONTROLLER", s)
            elif s.startswith("Pitch Wheel"):
                print("PITCHWEEL", s)
                # do nothing for now
                continue
            elif s.startswith("Note"):
                v = 0
                s = s.replace("Note ", "")
                #print("StartsWithNote", s)
                c = s.split(",")
                #print("CC", c[0])
                v = int(c[3].replace(" velocity ", ""))
                if c[0] == "On":
                    note = c[2].replace(" note ", "")
                    print("ON key[%s] = %d @ %fs" % (note, v, e.time_seconds))
                elif c[0] == "Off":
                    v = 0
                    note = c[2].replace(" note ", "")
                    print("OFF key[%s] = %d @ %fs" % (note, v, e.time_seconds))
                if note not in midi_notes:
                    print("WARNING: unable to use note %s %d" %
                          notesplit(note))
                    continue
                channel = "%s%i" % (channel_name, midi_notes.index(note))
                fcurve = a["keyframe_points"].setdefault(channel, [])
                fcurve.append((e.time_seconds * context.scene.render.fps, v))
                #sp[channel] = v
                #a["keyframe_points"].append(('["%s"]' % channel, e.time_seconds * context.scene.render.fps, v))
            else:
                #continue
                print("XXX", s)

        if len(a["keyframe_points"].keys()):
            midichannels.append(channel_name)
            midifile["TRACK : %s" % t.track_number] = a

    print(midichannels)

    keys = [
        "name", "wavfile", "start", "end", "row_height", "Channels",
        "channel_name", "MIDI"
    ]
    actions = []
    channels = []
    action = bpy.data.actions.new("MidiAction")
    sp.animation_data.action = action
    for k, a in midifile.items():
        print("-" * 77)
        print(midifile[k]["name"])

        tracks = a["keyframe_points"]

        #action = bpy.data.actions.new("MidiAction")
        #sp.animation_data.action = action
        for key in keys:
            action[key] = a[key]
        channel = a["channel_name"]
        channels.append(channel)
        for t in tracks.keys():
            kfps = tracks[t]
            print(t)
            #fc = action.fcurves.new('["%s"]' % t)
            # make the fcurves
            sp[t] = 0
            sp.keyframe_insert('["%s"]' % t, frame=1, group=a["name"])
            for kfp in kfps:
                f, v = kfp
                sp[t] = v
                sp.keyframe_insert('["%s"]' % t, frame=f, group=a["name"])
            print("KFPS", t, len(tracks[t]))

    actions.append(action)

    # would normally load the data here

    vals = []
    sp_rna = {}
    for a in actions:

        if len(a.fcurves) <= 1:
            print("NO FCURVES IN ", a.name)
            try:
                del (a["wavfile"])
                a.user_clear()
                actions.remove(a)
                #bpy.data.actions.remove(a)
                #print("REMOVED")
            except:
                print("XCPT")
                continue
            print("WTF")
            continue

        a["Channels"] = len(a.fcurves)
        a["channels"] = channels
        channel_name = a["channel_name"]

        for fc in a.fcurves:

            for kp in fc.keyframe_points:
                kp.interpolation = 'CONSTANT'
            fc_range, points = fc.minmax
            cn = fc.data_path.replace('["', '').replace('"]', '')
            print(channel_name, cn)
            n = int(cn[2:])
            f = pow(2, (n - 69) / 12.0) * 440
            high = low = f
            vals.extend(list(fc_range))
            rna = sp['_RNA_UI']
            set_channel_idprop_rna(cn,
                                   rna,
                                   low,
                                   high,
                                   fc_range,
                                   fc_range,
                                   is_music=True)
            '''
            vcns = ["%s%d" % (channel_name, i) for i in
                    range(len(midi_notes))]

            '''
        sp_rna = {k: sp['_RNA_UI'][k].to_dict() for k in sp['_RNA_UI'].keys()}
        # if k in vcns}

        a['rna'] = str(sp_rna)
        a['min'] = min(vals)
        a['max'] = max(vals)

        nla_drop(sp, action, 1, "%s %s" % (channel_name, channel_name))

    return {'FINISHED'}