Exemplo n.º 1
0
    def execute(self, context):
        # rebake action using modifiers
        scene = context.scene
        speaker = getSpeaker(context)
        action = getAction(speaker)
        name = action.name
        print("-" * 72)
        print("Rebake  action %s to sampled points" % name)
        print("-" * 72)
        rna = speaker["_RNA_UI"]
        sp_rna = {}
        pts = [(c, [(sp.co[0], c.evaluate(sp.co[0]))
                    for sp in c.keyframe_points]) for c in action.fcurves
               if c.select or self.bake_all]
        action.normalise = 'NONE'
        action["max"] = -float("inf")
        action["min"] = float("inf")

        start, end = action.frame_range[0], action.frame_range[1]

        for fc, sam in pts:

            #if self.RGB: fcu.color_mode = 'AUTO_RGB'

            for i, p in enumerate(sam):
                frame, v = p
                fc.keyframe_points[i].co.y = v

            fc.keyframe_points.update()

            channel_name = fc.data_path.strip('["]')

            is_music = False
            fc_range, points = fc.minmax
            low = rna[channel_name]['low']
            high = rna[channel_name]['high']
            (_min, _max) = fc_range
            if _min < action["min"]:
                action["min"] = _min
            if _max > action["max"]:
                action["max"] = _max

            set_channel_idprop_rna(channel_name,
                                   rna,
                                   low,
                                   high,
                                   fc_range,
                                   fc_range,
                                   is_music=is_music)
            sp_rna[channel_name] = rna[channel_name].to_dict()
            print("%4s %8s %8s %10.4f %10.4f" %
                  (channel_name, f(low), f(high), fc_range[0], fc_range[1]))
            # ok now bake
            fc.convert_to_samples(start, end)

        self.add_to_tweaks(action)

        return {'FINISHED'}
    def execute(self, context):
        # rebake action using modifiers
        scene = context.scene
        speaker = getSpeaker(context)
        action = getAction(speaker)
        name = action.name
        print("-" * 72)
        print("Rebake  action %s to sampled points" % name)
        print("-" * 72)
        rna = speaker["_RNA_UI"]
        sp_rna = {}
        pts = [(c, [(sp.co[0], c.evaluate(sp.co[0])) for sp in c.keyframe_points]) for c in action.fcurves if c.select or self.bake_all]
        action.normalise = 'NONE'
        action["max"] = -float("inf")
        action["min"] = float("inf")

        start, end = action.frame_range[0], action.frame_range[1]

        for fc, sam in pts:
            
            #if self.RGB: fcu.color_mode = 'AUTO_RGB'
            
            for i, p in enumerate(sam):
                frame, v = p
                fc.keyframe_points[i].co.y = v
            
            fc.keyframe_points.update()

            channel_name = fc.data_path.strip('["]')
            
            is_music = False
            fc_range, points = fc.minmax
            low = rna[channel_name]['low']
            high = rna[channel_name]['high']
            (_min, _max) = fc_range
            if _min < action["min"]:
                action["min"] = _min
            if _max > action["max"]:
                action["max"] = _max

            set_channel_idprop_rna(channel_name,
                                   rna,
                                   low,
                                   high,
                                   fc_range,
                                   fc_range,
                                   is_music=is_music)
            sp_rna[channel_name] = rna[channel_name].to_dict()
            print("%4s %8s %8s %10.4f %10.4f" % (channel_name, f(low), f(high), fc_range[0], fc_range[1]))
            # ok now bake
            fc.convert_to_samples(start, end)
        
        self.add_to_tweaks(action)

        return{'FINISHED'}
Exemplo n.º 3
0
    def execute(self, context):
        #unbake action
        speaker = getSpeaker(context)
        action = getAction(speaker)
        name = action.name
        print("-" * 72)
        print("Unbake action %s to keyframe points" % name)
        print("-" * 72)
        rna = speaker["_RNA_UI"]
        
        save_fcurve_select = [0] * len(action.fcurves)
        action.fcurves.foreach_get("select", save_fcurve_select)
        #action["max"] = -float("inf")
        #action["min"] = float("inf")
        channel_prefix = action["channel_name"]
        #keys.normalise = 'NONE'
        fcurves = [fc for fc in action.fcurves if len(fc.sampled_points)]
        sp_rna = speaker.get("_RNA_UI").to_dict()
        
        pts = [(fc, [(sp.co[0], fc.evaluate(sp.co[0])) for sp in fc.sampled_points]) for fc in fcurves if fc.select or self.bake_all]
        
        for fcu, fd in pts:
            dp = fcu.data_path
            i = fcu.array_index
            action.fcurves.remove(fcu)
            fc = action.fcurves.new(dp, index=i, action_group=channel_prefix)
            channel_name = dp.strip('["]')
            #fc.keyframe_points.foreach_set("co", [v for c in fd for v in c])
            for p in fd:
                w = fc.keyframe_points.insert(*p)

            is_music = False
            channel_rna = rna[channel_name]
            fc_range, points = fc.minmax
            low = channel_rna['low']
            high = channel_rna['high']
            (_min, _max) = fc_range
            if _min < action["min"]:
                action["min"] = _min
            if _max > action["max"]:
                action["max"] = _max

            set_channel_idprop_rna(channel_name,
                                   rna,
                                   low,
                                   high,
                                   fc_range,
                                   fc_range,
                                   is_music=is_music)

            sp_rna[channel_name] = channel_rna.to_dict()
            print("%4s %8s %8s %10.4f %10.4f" %\
                      (channel_name,\
                       f(low),\
                       f(high),\
                       fc_range[0],\
                       fc_range[1]))
        
        action['rna'] = str(sp_rna)
        action.normalise = 'NONE'
        action.fcurves.foreach_set("select", save_fcurve_select)
        #replace_speaker_action(speaker, action, keys)
        self.add_to_tweaks(speaker.animation_data.action)
        return{'FINISHED'}
Exemplo n.º 4
0
    def set_modifiers(type='ENVELOPE'):
        scene = context.scene
        #speaker = getSpeaker(context)
        for f in self.fcurves:
            channel = f.data_path.strip('[""]')
            touched = False
            while len(f.modifiers) < 2:
            # add muted envelope modifiers
                add_normal_envelope(f, type='ENVELOPE')
                touched = True
            for i, m in enumerate(f.modifiers):
                m.mute = True
                if self.normalise == 'NONE':
                    continue
                m.reference_value = 0.0
                m.default_min = self["min"]\
                                if not i else speaker_rna[channel]["min"]
                m.default_max = self["max"]\
                                if not i else speaker_rna[channel]["max"]

            low = speaker_rna[channel]["low"]
            high = speaker_rna[channel]["high"]
            sp_rna = speaker['_RNA_UI']

            map_range = Vector((self['min'], self['max']))
            if self.normalise == 'NONE':
                fc_range = Vector((speaker_rna[channel]['a'],
                                  speaker_rna[channel]['b']))
                '''
                speaker['_RNA_UI'][channel] = speaker_rna[channel]
                speaker['_RNA_UI']['a'] = self['min']
                speaker['_RNA_UI']['b'] = self['max']
                '''
                pass
            else:
                # could use the mods ID prop to get indexes
                if self.normalise == 'ACTION':
                    m = f.modifiers[0]
                    b = Vector(self.normalise_range).magnitude
                    fc_range = Vector((speaker_rna[channel]['a'],
                                      speaker_rna[channel]['b']))
                    a = map_range.magnitude
                    fc_range *= b / a
                    map_range = Vector(self.normalise_range)
                if self.normalise == 'CHANNEL':
                    m = f.modifiers[1]
                    fc_range = map_range = self.normalise_range
                for cp in m.control_points:
                    cp.min = self.normalise_range[0]
                    cp.max = self.normalise_range[1]

                m.mute = False

            set_channel_idprop_rna(channel,
                                   sp_rna,
                                   low,
                                   high,
                                   fc_range,
                                   map_range,
                                   is_music=(self["boo"] == 'MUSIC'))

        # flag the mods are added
        self["mods"] = True
Exemplo n.º 5
0
def read_midi_file(context, filepath, use_some_setting):
    print(filepath)
    prefs = context.user_preferences.addons["sound_drivers"].preferences
    import sys

    sys.path.append(prefs.smf_dir)
    import smf

    pin_id = context.space_data.pin_id
    sp = pin_id if pin_id is not None else context.object.data

    channels = [c for sp in context.scene.objects if sp.type == "SPEAKER" for c in sp.data.channels]
    midichannels = []
    f = smf.SMF(filepath)
    tracks = []
    for t in f.tracks:
        print("TRACK : %s" % t.track_number)
        a = {}
        # a = bpy.data.actions.new("MidiAction")

        print(channels)
        channel_name = unique_name(channels, "AA")
        channels.append(channel_name)
        a["name"] = "%s Unknown (trk %d)" % (channel_name, t.track_number)
        a["wavfile"] = sp.sound.name
        a["start"] = 0
        a["end"] = len(midi_notes)
        a["row_height"] = 0.4
        a["Channels"] = 0
        a["channel_name"] = channel_name
        a["MIDI"] = filepath
        # sp.animation_data.action = a
        a["keyframe_points"] = {}  # list of tups (dp, frame, value)

        """
        for i, n in enumerate(midi_notes):
            channel = "%s%d" % (channel_name, i)
            sp[channel] = 0
            sp.keyframe_insert('["%s"]' % channel, frame = 0)
            #sp.keyframe_insert('["%s"]' % channel, frame = 0)
            #sp.keyframe_insert('["%s"]' % channel, frame = 0)
        """
        for e in t.events:
            s = e.decode()
            print("XXXXXXXX", s, e.time_seconds)
            # print(s.startswith("Note"))

            if s.startswith("Instrument"):
                print("INSTRUMENT", s)
                a["name"] = "%s:%s (trk %d)" % (channel_name, s.title(), t.track_number)
            elif s.startswith("Key Signature"):
                print("KEYSIG", s)
            elif s.startswith("Program Change"):
                # print("PROGCHA", s)
                # this could be instrument too
                idx = int(s.split(" ")[-1])
                # print(midi_instruments[idx])
                a["name"] = "%s:%s (trk %d)" % (channel_name, midi_instruments[idx].title(), t.track_number)
            elif s.startswith("Controller"):
                continue
                print("CONTROLLER", s)
            elif s.startswith("Pitch Wheel"):
                print("PITCHWEEL", s)
                # do nothing for now
                continue
            elif s.startswith("Note"):
                v = 0
                s = s.replace("Note ", "")
                # print("StartsWithNote", s)
                c = s.split(",")
                # print("CC", c[0])
                v = int(c[3].replace(" velocity ", ""))
                if c[0] == "On":
                    note = c[2].replace(" note ", "")
                    print("ON key[%s] = %d @ %fs" % (note, v, e.time_seconds))
                elif c[0] == "Off":
                    v = 0
                    note = c[2].replace(" note ", "")
                    print("OFF key[%s] = %d @ %fs" % (note, v, e.time_seconds))
                if note not in midi_notes:
                    print("WARNING: unable to use note %s %d" % notesplit(note))
                    continue
                channel = "%s%i" % (channel_name, midi_notes.index(note))
                fcurve = a["keyframe_points"].setdefault(channel, [])
                fcurve.append((e.time_seconds * context.scene.render.fps, v))
                # sp[channel] = v
                # a["keyframe_points"].append(('["%s"]' % channel, e.time_seconds * context.scene.render.fps, v))
            else:
                # continue
                print("XXX", s)

        if len(a["keyframe_points"].keys()):
            midichannels.append(channel_name)
            midifile["TRACK : %s" % t.track_number] = a

    print(midichannels)

    keys = ["name", "wavfile", "start", "end", "row_height", "Channels", "channel_name", "MIDI"]
    actions = []
    channels = []
    action = bpy.data.actions.new("MidiAction")
    sp.animation_data.action = action
    for k, a in midifile.items():
        print("-" * 77)
        print(midifile[k]["name"])

        tracks = a["keyframe_points"]

        # action = bpy.data.actions.new("MidiAction")
        # sp.animation_data.action = action
        for key in keys:
            action[key] = a[key]
        channel = a["channel_name"]
        channels.append(channel)
        for t in tracks.keys():
            kfps = tracks[t]
            print(t)
            # fc = action.fcurves.new('["%s"]' % t)
            # make the fcurves
            sp[t] = 0
            sp.keyframe_insert('["%s"]' % t, frame=1, group=a["name"])
            for kfp in kfps:
                f, v = kfp
                sp[t] = v
                sp.keyframe_insert('["%s"]' % t, frame=f, group=a["name"])
            print("KFPS", t, len(tracks[t]))

    actions.append(action)

    # would normally load the data here

    vals = []
    sp_rna = {}
    for a in actions:

        if len(a.fcurves) <= 1:
            print("NO FCURVES IN ", a.name)
            try:
                del (a["wavfile"])
                a.user_clear()
                actions.remove(a)
                # bpy.data.actions.remove(a)
                # print("REMOVED")
            except:
                print("XCPT")
                continue
            print("WTF")
            continue

        a["Channels"] = len(a.fcurves)
        a["channels"] = channels
        channel_name = a["channel_name"]

        for fc in a.fcurves:

            for kp in fc.keyframe_points:
                kp.interpolation = "CONSTANT"
            fc_range, points = fc.minmax
            cn = fc.data_path.replace('["', "").replace('"]', "")
            print(channel_name, cn)
            n = int(cn[2:])
            f = pow(2, (n - 69) / 12.0) * 440
            high = low = f
            vals.extend(list(fc_range))
            rna = sp["_RNA_UI"]
            set_channel_idprop_rna(cn, rna, low, high, fc_range, fc_range, is_music=True)

            """
            vcns = ["%s%d" % (channel_name, i) for i in
                    range(len(midi_notes))]

            """
        sp_rna = {k: sp["_RNA_UI"][k].to_dict() for k in sp["_RNA_UI"].keys()}
        # if k in vcns}

        a["rna"] = str(sp_rna)
        a["min"] = min(vals)
        a["max"] = max(vals)

        nla_drop(sp, action, 1, "%s %s" % (channel_name, channel_name))

    return {"FINISHED"}
    def modal(self, context, event):
        if getattr(context, "area", None):
            context.area.tag_redraw()
        wm = context.window_manager

        '''
        if BakeSoundPanel.wait > 0:
            debug.print("waiting", BakeSoundPanel.wait)
        '''

        def confirm_cancel(self, context):
            layout = self.layout
            layout.operator("sounddrivers.cancel_baking")
            layout.operator("sounddrivers.continue_baking")
        
        if BakeSoundPanel.cancel_baking:
            self.clean()
            return self.cancel(context)

        BakeSoundPanel.baking = True

        bakeoptions = self.sound.bakeoptions
        channels = bakeoptions.channels
        bake_operator = bakeoptions.bake_operator
        sound = self.sound
        speaker = self.speaker
        action = speaker.animation_data.action
        
        if event.type == 'ESC' or not BakeSoundPanel.baking:
            context.window_manager.popup_menu(confirm_cancel, title="Baking", icon='SOUND')
            BakeSoundPanel.wait = 1000000
            return {'PASS_THROUGH'}
            self.clean()
            return self.cancel(context)

        if BakeSoundPanel.wait > 0:
            BakeSoundPanel.wait -= 1
            return {'PASS_THROUGH'}

        if  self.count >= bakeoptions.channels:
            # Success do PostPro
            # return {'PASS_THROUGH'}
            return self.finished(context)

        if self.baking:
            return {'PASS_THROUGH'}

        if event.type == 'TIMER':
            if self.baking:
                return {'PASS_THROUGH'}
            #context.scene.frame_set(1)
            self.baking = True
            fc = action.fcurves[self.bakeorder[self.count]]

            channel = self.bakeorder[self.count]
            wm["bake_progress"] = 100 * self.count / channels
            setattr(BakeSoundPanel, "channel", channel)
            BakeSoundPanel.report = "[%s%d]" % (bakeoptions.channel_name,
                                                      channel)

            fc.select = True
            #FIXME FIXME FIXME
            fp = bpy.path.abspath(sound.filepath)
            low, high = self.channel_range()
            if not self.context_override or not self.graph:
                context.area.type = 'GRAPH_EDITOR'
                context.area.spaces.active.mode = 'FCURVES'
                self.c = context.copy()

            context.scene.frame_set(1)
            #context.area.type = 'GRAPH_EDITOR'

            t0 = time.clock()
            try:
                #x = bpy.ops.graph.sound_bake(

                x = bpy.ops.graph.sound_bake(self.c,
                             filepath=fp,
                             low=low,
                             high=high,
                             attack=bake_operator.attack,
                             release=bake_operator.release,
                             threshold=bake_operator.threshold,
                             use_accumulate=bake_operator.use_accumulate,
                             use_additive=bake_operator.use_additive,
                             use_square=bake_operator.use_square,
                             sthreshold=bake_operator.sthreshold)
            except:
                print("ERROR IN BAKE OP")
                '''
                for k in self.c.keys():
                    print(k, ":", self.c[k])

                '''
                return self.cancel(context)

            '''
            if self.graph:
                #bpy.ops.graph.view_all(self.c)
                bpy.ops.graph.view_all_with_bgl_graph()
            '''

            context.area.type = 'PROPERTIES'
            t1 = time.clock()
            BakeSoundPanel.bake_times.append(t1 - t0)

            fc_range, points = fc.minmax
            vol_range = abs(fc_range[1] - fc_range[0])
            # FIXME make retry count an addon var.
            if self.retries.count(channel) > channels // 5:
                print("TOO MANY RETRIES")
                self.clean()
                return self.cancel(context)
            if bakeoptions.auto_adjust\
                and (vol_range < 0.0001 or vol_range > 1e10):
                print("NO RANGE", vol_range)
                self.retries.append(channel)
                BakeSoundPanel.status[channel] = 99
                if channel == 0:
                    BakeSoundPanel.report = "[%s%d] NO Lo RANGE.. adjusting" \
                    % (bakeoptions.channel_name, channel)
                    bakeoptions.minf = high
                elif channel == (bakeoptions.channels - 1):
                    BakeSoundPanel.report = "[%s%d] NO Hi RANGE .. adjusting" \
                                       % (bakeoptions.channel_name, channel)
                    self.change_last == True
                    bakeoptions.maxf = low
                else:
                    BakeSoundPanel.wait = 20  # wait 2 seconds to continue
                    BakeSoundPanel.report = "[%s%d] NO Mid RANGE\
                            .. continuing" % (bakeoptions.channel_name,\
                                                      channel)
                    self.count += 1
                    bpy.ops.graph.view_all_with_bgl_graph()
                #need to set count down one
            else:
                BakeSoundPanel.status[channel] = 1
                # set up the rna
                rna = speaker["_RNA_UI"]
                channel_name = "%s%d" % (bakeoptions.channel_name, channel)

                is_music = bakeoptions.sound_type == 'MUSIC'
                utils.set_channel_idprop_rna(channel_name,
                                       rna,
                                       low,
                                       high,
                                       fc_range,
                                       fc_range,
                                       is_music=is_music)

                print("%4s %8s %8s %10.4f %10.4f" %\
                          (channel_name,\
                           utils.f(low),\
                           utils.f(high),\
                           fc_range[0],\
                           fc_range[1]),\
                           end="")
                print(" %02d:%02d:%02d" % (utils.splittime(t1 - t0)))
                BakeSoundPanel.report = rna[channel_name]["description"]\
                        .replace("Frequency", "")
                if channel == (bakeoptions.channels - 1)\
                        and self.change_last:
                    self.change_last = False
                    action.fcurves[0].mute = True
                    bakeorder[0], bakeorder[channels - 1] =\
                            bakeorder[channels - 1], bakeorder[0]
                    # need to swap n clear first fcurve
                    # mute the first fcurve
                _min, _max = fc_range
                if _min < action["min"]:
                    action["min"] = _min
                if _max > action["max"]:
                    action["max"] = _max
                self.count += 1

            fc.mute = not bool(BakeSoundPanel.status[channel])
            fc.select = False
            self.baking = False
            self.baked = True

        return {'PASS_THROUGH'}
Exemplo n.º 7
0
    def set_modifiers(type='ENVELOPE'):
        scene = context.scene
        #speaker = getSpeaker(context)
        for f in self.fcurves:
            channel = f.data_path.strip('[""]')
            touched = False
            while len(f.modifiers) < 2:
            # add muted envelope modifiers
                add_normal_envelope(f, type='ENVELOPE')
                touched = True
            for i, m in enumerate(f.modifiers):
                m.mute = True
                if self.normalise == 'NONE':
                    continue
                m.reference_value = 0.0
                m.default_min = self["min"]\
                                if i == 0 else speaker_rna[channel]["min"]
                m.default_max = self["max"]\
                                if i == 0 else speaker_rna[channel]["max"]

            low = speaker_rna[channel]["low"]
            high = speaker_rna[channel]["high"]
            sp_rna = speaker['_RNA_UI']

            map_range = Vector((self['min'], self['max']))
            if self.normalise == 'NONE':
                fc_range = Vector((speaker_rna[channel]['a'],
                                  speaker_rna[channel]['b']))
                '''
                speaker['_RNA_UI'][channel] = speaker_rna[channel]
                speaker['_RNA_UI']['a'] = self['min']
                speaker['_RNA_UI']['b'] = self['max']
                '''
                pass
            else:
                # could use the mods ID prop to get indexes
                if self.normalise == 'ACTION':
                    m = f.modifiers[0]
                    b = Vector(self.normalise_range).magnitude
                    fc_range = Vector((speaker_rna[channel]['a'],
                                      speaker_rna[channel]['b']))
                    a = map_range.magnitude
                    fc_range *= b / a
                    map_range = Vector(self.normalise_range)
                if self.normalise == 'CHANNEL':
                    m = f.modifiers[1]
                    fc_range = map_range = self.normalise_range
                for cp in m.control_points:
                    cp.min = self.normalise_range[0]
                    cp.max = self.normalise_range[1]

                m.mute = False

            set_channel_idprop_rna(channel,
                                   sp_rna,
                                   low,
                                   high,
                                   fc_range,
                                   map_range,
                                   is_music=(self["boo"] == 'MUSIC'))

        # flag the mods are added
        self["mods"] = True
    def execute(self, context):
        #unbake action
        speaker = getSpeaker(context)
        action = getAction(speaker)
        name = action.name
        print("-" * 72)
        print("Unbake action %s to keyframe points" % name)
        print("-" * 72)
        rna = speaker["_RNA_UI"]
        
        save_fcurve_select = [0] * len(action.fcurves)
        action.fcurves.foreach_get("select", save_fcurve_select)
        #action["max"] = -float("inf")
        #action["min"] = float("inf")
        channel_prefix = action["channel_name"]
        #keys.normalise = 'NONE'
        fcurves = [fc for fc in action.fcurves if len(fc.sampled_points)]
        sp_rna = speaker.get("_RNA_UI").to_dict()
        
        pts = [(fc, [(sp.co[0], fc.evaluate(sp.co[0])) for sp in fc.sampled_points]) for fc in fcurves if fc.select or self.bake_all]
        
        for fcu, fd in pts:
            dp = fcu.data_path
            i = fcu.array_index
            action.fcurves.remove(fcu)
            fc = action.fcurves.new(dp, index=i, action_group=channel_prefix)
            channel_name = dp.strip('["]')
            #fc.keyframe_points.foreach_set("co", [v for c in fd for v in c])
            for p in fd:
                w = fc.keyframe_points.insert(*p)

            is_music = False
            channel_rna = rna[channel_name]
            fc_range, points = fc.minmax
            low = channel_rna['low']
            high = channel_rna['high']
            (_min, _max) = fc_range
            if _min < action["min"]:
                action["min"] = _min
            if _max > action["max"]:
                action["max"] = _max

            set_channel_idprop_rna(channel_name,
                                   rna,
                                   low,
                                   high,
                                   fc_range,
                                   fc_range,
                                   is_music=is_music)

            sp_rna[channel_name] = channel_rna.to_dict()
            print("%4s %8s %8s %10.4f %10.4f" %\
                      (channel_name,\
                       f(low),\
                       f(high),\
                       fc_range[0],\
                       fc_range[1]))
        
        action['rna'] = str(sp_rna)
        action.normalise = 'NONE'
        action.fcurves.foreach_set("select", save_fcurve_select)
        #replace_speaker_action(speaker, action, keys)
        self.add_to_tweaks(speaker.animation_data.action)
        return{'FINISHED'}
Exemplo n.º 9
0
def read_midi_file(context, filepath, use_some_setting):
    print(filepath)
    prefs = context.user_preferences.addons["sound_drivers"].preferences
    import sys
    sys.path.append(prefs.smf_dir)
    import smf
    pin_id = context.space_data.pin_id
    sp = pin_id if pin_id is not None else context.object.data

    channels = [
        c for sp in context.scene.objects if sp.type == 'SPEAKER'
        for c in sp.data.channels
    ]
    midichannels = []
    f = smf.SMF(filepath)
    tracks = []
    for t in f.tracks:
        print("TRACK : %s" % t.track_number)
        a = {}
        #a = bpy.data.actions.new("MidiAction")

        print(channels)
        channel_name = unique_name(channels, 'AA')
        channels.append(channel_name)
        a["name"] = "%s Unknown (trk %d)" % (channel_name, t.track_number)
        a['wavfile'] = sp.sound.name
        a['start'] = 0
        a['end'] = len(midi_notes)
        a['row_height'] = 0.4
        a["Channels"] = 0
        a["channel_name"] = channel_name
        a['MIDI'] = filepath
        #sp.animation_data.action = a
        a["keyframe_points"] = {}  # list of tups (dp, frame, value)
        '''
        for i, n in enumerate(midi_notes):
            channel = "%s%d" % (channel_name, i)
            sp[channel] = 0
            sp.keyframe_insert('["%s"]' % channel, frame = 0)
            #sp.keyframe_insert('["%s"]' % channel, frame = 0)
            #sp.keyframe_insert('["%s"]' % channel, frame = 0)
        '''
        for e in t.events:
            s = e.decode()
            print("XXXXXXXX", s, e.time_seconds)
            #print(s.startswith("Note"))

            if s.startswith("Instrument"):
                print("INSTRUMENT", s)
                a["name"] = "%s:%s (trk %d)" % (channel_name, s.title(),
                                                t.track_number)
            elif s.startswith("Key Signature"):
                print("KEYSIG", s)
            elif s.startswith("Program Change"):
                #print("PROGCHA", s)
                # this could be instrument too
                idx = int(s.split(" ")[-1])
                #print(midi_instruments[idx])
                a["name"] = "%s:%s (trk %d)" % (channel_name,
                                                midi_instruments[idx].title(),
                                                t.track_number)
            elif s.startswith("Controller"):
                continue
                print("CONTROLLER", s)
            elif s.startswith("Pitch Wheel"):
                print("PITCHWEEL", s)
                # do nothing for now
                continue
            elif s.startswith("Note"):
                v = 0
                s = s.replace("Note ", "")
                #print("StartsWithNote", s)
                c = s.split(",")
                #print("CC", c[0])
                v = int(c[3].replace(" velocity ", ""))
                if c[0] == "On":
                    note = c[2].replace(" note ", "")
                    print("ON key[%s] = %d @ %fs" % (note, v, e.time_seconds))
                elif c[0] == "Off":
                    v = 0
                    note = c[2].replace(" note ", "")
                    print("OFF key[%s] = %d @ %fs" % (note, v, e.time_seconds))
                if note not in midi_notes:
                    print("WARNING: unable to use note %s %d" %
                          notesplit(note))
                    continue
                channel = "%s%i" % (channel_name, midi_notes.index(note))
                fcurve = a["keyframe_points"].setdefault(channel, [])
                fcurve.append((e.time_seconds * context.scene.render.fps, v))
                #sp[channel] = v
                #a["keyframe_points"].append(('["%s"]' % channel, e.time_seconds * context.scene.render.fps, v))
            else:
                #continue
                print("XXX", s)

        if len(a["keyframe_points"].keys()):
            midichannels.append(channel_name)
            midifile["TRACK : %s" % t.track_number] = a

    print(midichannels)

    keys = [
        "name", "wavfile", "start", "end", "row_height", "Channels",
        "channel_name", "MIDI"
    ]
    actions = []
    channels = []
    action = bpy.data.actions.new("MidiAction")
    sp.animation_data.action = action
    for k, a in midifile.items():
        print("-" * 77)
        print(midifile[k]["name"])

        tracks = a["keyframe_points"]

        #action = bpy.data.actions.new("MidiAction")
        #sp.animation_data.action = action
        for key in keys:
            action[key] = a[key]
        channel = a["channel_name"]
        channels.append(channel)
        for t in tracks.keys():
            kfps = tracks[t]
            print(t)
            #fc = action.fcurves.new('["%s"]' % t)
            # make the fcurves
            sp[t] = 0
            sp.keyframe_insert('["%s"]' % t, frame=1, group=a["name"])
            for kfp in kfps:
                f, v = kfp
                sp[t] = v
                sp.keyframe_insert('["%s"]' % t, frame=f, group=a["name"])
            print("KFPS", t, len(tracks[t]))

    actions.append(action)

    # would normally load the data here

    vals = []
    sp_rna = {}
    for a in actions:

        if len(a.fcurves) <= 1:
            print("NO FCURVES IN ", a.name)
            try:
                del (a["wavfile"])
                a.user_clear()
                actions.remove(a)
                #bpy.data.actions.remove(a)
                #print("REMOVED")
            except:
                print("XCPT")
                continue
            print("WTF")
            continue

        a["Channels"] = len(a.fcurves)
        a["channels"] = channels
        channel_name = a["channel_name"]

        for fc in a.fcurves:

            for kp in fc.keyframe_points:
                kp.interpolation = 'CONSTANT'
            fc_range, points = fc.minmax
            cn = fc.data_path.replace('["', '').replace('"]', '')
            print(channel_name, cn)
            n = int(cn[2:])
            f = pow(2, (n - 69) / 12.0) * 440
            high = low = f
            vals.extend(list(fc_range))
            rna = sp['_RNA_UI']
            set_channel_idprop_rna(cn,
                                   rna,
                                   low,
                                   high,
                                   fc_range,
                                   fc_range,
                                   is_music=True)
            '''
            vcns = ["%s%d" % (channel_name, i) for i in
                    range(len(midi_notes))]

            '''
        sp_rna = {k: sp['_RNA_UI'][k].to_dict() for k in sp['_RNA_UI'].keys()}
        # if k in vcns}

        a['rna'] = str(sp_rna)
        a['min'] = min(vals)
        a['max'] = max(vals)

        nla_drop(sp, action, 1, "%s %s" % (channel_name, channel_name))

    return {'FINISHED'}
Exemplo n.º 10
0
    def modal(self, context, event):
        if getattr(context, "area", None):
            context.area.tag_redraw()
        wm = context.window_manager
        '''
        if BakeSoundPanel.wait > 0:
            debug.print("waiting", BakeSoundPanel.wait)
        '''
        def confirm_cancel(self, context):
            layout = self.layout
            layout.operator("sounddrivers.cancel_baking")
            layout.operator("sounddrivers.continue_baking")

        if BakeSoundPanel.cancel_baking:
            self.clean()
            return self.cancel(context)

        BakeSoundPanel.baking = True

        bakeoptions = self.sound.bakeoptions
        channels = bakeoptions.channels
        bake_operator = bakeoptions.bake_operator
        sound = self.sound
        speaker = self.speaker
        action = speaker.animation_data.action

        if event.type == 'ESC' or not BakeSoundPanel.baking:
            context.window_manager.popup_menu(confirm_cancel,
                                              title="Baking",
                                              icon='SOUND')
            BakeSoundPanel.wait = 1000000
            return {'PASS_THROUGH'}
            self.clean()
            return self.cancel(context)

        if BakeSoundPanel.wait > 0:
            BakeSoundPanel.wait -= 1
            return {'PASS_THROUGH'}

        if self.count >= bakeoptions.channels:
            # Success do PostPro
            # return {'PASS_THROUGH'}
            return self.finished(context)

        if self.baking:
            return {'PASS_THROUGH'}

        if event.type == 'TIMER':
            if self.baking:
                return {'PASS_THROUGH'}
            #context.scene.frame_set(1)
            self.baking = True
            fc = action.fcurves[self.bakeorder[self.count]]

            channel = self.bakeorder[self.count]
            wm["bake_progress"] = 100 * self.count / channels
            setattr(BakeSoundPanel, "channel", channel)
            BakeSoundPanel.report = "[%s%d]" % (bakeoptions.channel_name,
                                                channel)

            fc.select = True
            #FIXME FIXME FIXME
            fp = bpy.path.abspath(sound.filepath)
            low, high = self.channel_range()
            if not self.context_override or not self.graph:
                context.area.type = 'GRAPH_EDITOR'
                context.area.spaces.active.mode = 'FCURVES'
                self.c = context.copy()

            context.scene.frame_set(1)
            #context.area.type = 'GRAPH_EDITOR'

            t0 = time.clock()
            try:
                #x = bpy.ops.graph.sound_bake(

                x = bpy.ops.graph.sound_bake(
                    self.c,
                    filepath=fp,
                    low=low,
                    high=high,
                    attack=bake_operator.attack,
                    release=bake_operator.release,
                    threshold=bake_operator.threshold,
                    use_accumulate=bake_operator.use_accumulate,
                    use_additive=bake_operator.use_additive,
                    use_square=bake_operator.use_square,
                    sthreshold=bake_operator.sthreshold)
            except:
                print("ERROR IN BAKE OP")
                '''
                for k in self.c.keys():
                    print(k, ":", self.c[k])

                '''
                return self.cancel(context)
            '''
            if self.graph:
                #bpy.ops.graph.view_all(self.c)
                bpy.ops.graph.view_all_with_bgl_graph()
            '''

            context.area.type = 'PROPERTIES'
            t1 = time.clock()
            BakeSoundPanel.bake_times.append(t1 - t0)

            fc_range, points = fc.minmax
            vol_range = abs(fc_range[1] - fc_range[0])
            # FIXME make retry count an addon var.
            if self.retries.count(channel) > channels // 5:
                print("TOO MANY RETRIES")
                self.clean()
                return self.cancel(context)
            if bakeoptions.auto_adjust\
                and (vol_range < 0.0001 or vol_range > 1e10):
                print("NO RANGE", vol_range)
                self.retries.append(channel)
                BakeSoundPanel.status[channel] = 99
                if channel == 0:
                    BakeSoundPanel.report = "[%s%d] NO Lo RANGE.. adjusting" \
                    % (bakeoptions.channel_name, channel)
                    bakeoptions.minf = high
                elif channel == (bakeoptions.channels - 1):
                    BakeSoundPanel.report = "[%s%d] NO Hi RANGE .. adjusting" \
                                       % (bakeoptions.channel_name, channel)
                    self.change_last == True
                    bakeoptions.maxf = low
                else:
                    BakeSoundPanel.wait = 20  # wait 2 seconds to continue
                    BakeSoundPanel.report = "[%s%d] NO Mid RANGE\
                            .. continuing"                                           % (bakeoptions.channel_name,\
                                                      channel)
                    self.count += 1
                    bpy.ops.graph.view_all_with_bgl_graph()
                #need to set count down one
            else:
                BakeSoundPanel.status[channel] = 1
                # set up the rna
                rna = speaker["_RNA_UI"]
                channel_name = "%s%d" % (bakeoptions.channel_name, channel)

                is_music = bakeoptions.sound_type == 'MUSIC'
                utils.set_channel_idprop_rna(channel_name,
                                             rna,
                                             low,
                                             high,
                                             fc_range,
                                             fc_range,
                                             is_music=is_music)

                print("%4s %8s %8s %10.4f %10.4f" %\
                          (channel_name,\
                           utils.f(low),\
                           utils.f(high),\
                           fc_range[0],\
                           fc_range[1]),\
                           end="")
                print(" %02d:%02d:%02d" % (utils.splittime(t1 - t0)))
                BakeSoundPanel.report = rna[channel_name]["description"]\
                        .replace("Frequency", "")
                if channel == (bakeoptions.channels - 1)\
                        and self.change_last:
                    self.change_last = False
                    action.fcurves[0].mute = True
                    bakeorder[0], bakeorder[channels - 1] =\
                            bakeorder[channels - 1], bakeorder[0]
                    # need to swap n clear first fcurve
                    # mute the first fcurve
                _min, _max = fc_range
                if _min < action["min"]:
                    action["min"] = _min
                if _max > action["max"]:
                    action["max"] = _max
                self.count += 1

            fc.mute = not bool(BakeSoundPanel.status[channel])
            fc.select = False
            self.baking = False
            self.baked = True

        return {'PASS_THROUGH'}