def __init__(self, threadID, input_shape): exp.__init__(self, threadID, input_shape) self.name = "Sweet Dawn" print(" - loading Yolo") self.Y = yolo.YOLO() print(" - loading sound") import simpleaudio as sa #D:\Documents\GitHub\Interactive_Art_v1\Experience\Filter_Modules\sounds\sweet_arpegiato sample_folder = "D:\\Documents\\GitHub\\Interactive_Art_v1\\Experience\\Filter_Modules\\sounds\\sweet_arpegiato" onlyfiles = [ f for f in listdir(sample_folder) if isfile(join(sample_folder, f)) ] self.sound_list = [] i = 0 for f in onlyfiles: print(str(i), f) r, g, b = random.randint(0, 255), random.randint( 0, 255), random.randint(0, 255) name = str(i) i += 1 self.sound_list.append( (sa.WaveObject.from_wave_file(join(sample_folder, f)), (r, g, b), name)) self.alone = sa.WaveObject.from_wave_file( "D:\\Documents\\GitHub\\Interactive_Art_v1\\Experience\\Filter_Modules\\sounds\\Ya personne.wav" ) self.playing_sounds = [] self.silent_sounds = [i for i in range(len(self.sound_list))]
def __init__(self, threadID, input_shape): exp.__init__(self, threadID, input_shape) self.name = "Sweet VCV" print(" - loading Yolo") from .Modules.keras_yolo3 import yolo global graph # needed for yolo to read the images stuff to do with multi threading graph = tf.get_default_graph() self.Y = yolo.YOLO() print(" - loading Mido module") from .Modules.Midi_output_module import MidiOutMod, float_to_midi self.midiout = MidiOutMod('midoVCV 2') #tracks the number of objects on channel 1 self.nb_objectstomidi = float_to_midi('nb_objects', [0, 5], 10, 1) self.midiout.signals.append(self.nb_objectstomidi) self.moduleslist.append(self.midiout) #self.midiout.start() print(" - visual stuff") #cv2 put text stuff self.font = cv2.FONT_HERSHEY_SIMPLEX self.fontScale = 1 self.lineType = 2 self.out = np.zeros(input_shape, dtype=np.uint8)
def __init__(self, threadID, input_shape): exp.__init__(self, threadID, input_shape) self.name = "Fish VCV" print(" - loading Yolo") from .Modules.keras_yolo3 import yolo global graph # needed for yolo to read the images stuff to do with multi threading graph = tf.get_default_graph() self.Y = yolo.YOLO() print(" - loading Mido module") from .Modules.Midi_output_module import MidiOutMod, float_to_midi self.midiout = MidiOutMod('midoVCV 2') #tracks the number of objects on channel 1 self.nb_objectstomidi = float_to_midi('nb_objects', [0, 5], 10, 1) self.midiout.signals.append(self.nb_objectstomidi) self.moduleslist.append(self.midiout) self.midiout.start() print("- import vid") from .Modules.Video_Loop_module import Video_Loop_Mod self.vid = Video_Loop_Mod("Experience\\Modules\\Videos\\cubesloop.mp4", input_shape) self.moduleslist.append(self.vid) self.vid.start() print(" - visual stuff") #cv2 put text stuff self.font = cv2.FONT_HERSHEY_SIMPLEX self.fontScale = 1 self.lineType = 2
def __init__(self, threadID, input_shape): exp.__init__(self, threadID, input_shape) self.name = "Sweet Arpegiato" print(" - loading sounds") from .Modules.sounds.Module_SimpleAudio import SoundLoops self.sound_to_track = [0 for i in range(7)] self.Jukbox = SoundLoops( "D:\\Documents\\GitHub\\Interactive_Art_v1\\Experience\\Modules\\sounds\\sweet_arpegiato", "D:\\Documents\\GitHub\\Interactive_Art_v1\\Experience\\Modules\\sounds\\swet_beat\\Sweet Arpeges 9-SessionDry Kit.wav" ) self.moduleslist.append(self.Jukbox) self.Jukbox.start() print(" - loading Yolo") from .Modules.keras_yolo3 import yolo global graph # needed for yolo to read the images stuff to do with multi threading graph = tf.get_default_graph() self.Y = yolo.YOLO() print(" - setting Tracking module") from .Modules.Tracking_module import multi_Tracker_Module, Tracker self.multi_Tracker = multi_Tracker_Module(dim=4, labeled=True, forgeting_speed=50) self.moduleslist.append(self.multi_Tracker) self.multi_Tracker.start() print(" - visual stuff") #cv2 put text stuff self.font = cv2.FONT_HERSHEY_SIMPLEX self.fontScale = 1 self.lineType = 2
def __init__(self, threadID, input_shape): exp.__init__(self, threadID, input_shape) self.name = "Joconde" print(" - importing Haar module") cascadelist = ['haarcascade_smile.xml','haarcascade_eye.xml'] self.haar = HaarCasMod('Experience\\Modules\\opencvHaarCascade',cascadelist) self.moduleslist.append(self.haar) self.haar.start() print(" - importing images") self.smile = cv2.imread("Experience\\Modules\\Images\\Joconde\\Mona_Lisa_detail_mouth.jpg") self.eye = cv2.imread("Experience\\Modules\\Images\\Joconde\\Mona_Lisa_detail_eye.jpg")
def __init__(self, threadID, input_shape): exp.__init__(self, threadID, input_shape) self.name = "Basic experience" print(" - importing Base module") self.module = TM() self.moduleslist.append(self.module) self.module.start() print(" - visual stuff") #cv2 put text stuff self.font = cv2.FONT_HERSHEY_SIMPLEX self.fontScale = 1 self.lineType = 2
def __init__(self, threadID, input_shape): exp.__init__(self, threadID, input_shape) self.name = "Text On Screen" print(" - Speech recognition stuff") from .Modules.Speech_Recognition_module import Speech_Recognition as SR self.speechmod = SR(device_index=2,Google = True,showmic=False) self.moduleslist.append(self.speechmod) self.speechmod.start() print(" - text stuff") self.text = "" print(" - visual stuff") #cv2 put text stuff self.font = ImageFont.truetype('Fonts\\typewriter\\TYPEWR__.TTF', 30) print(self.font) self.fontScale = 1 self.lineType = 2 self.textcolor = (0,0,0)
def __init__(self, threadID, input_shape): exp.__init__(self, threadID, input_shape) self.name = "Basic experience" print(" - importing Base module") from .Modules.Base_module import Threaded_Module as TM self.module = TM() self.moduleslist.append(self.module) self.module.start() print("- import vid") self.vid = Video_Loop_Mod("Experience\\Modules\\Videos\\cubesloop.mp4", input_shape) self.moduleslist.append(self.vid) self.vid.start() print(" - visual stuff") #cv2 put text stuff self.font = cv2.FONT_HERSHEY_SIMPLEX self.fontScale = 1 self.lineType = 2
def __init__(self, threadID, input_shape): exp.__init__(self, threadID, input_shape) self.name = "Sweet VCV" print(" - loading Yolo") from .Modules.keras_yolo3 import yolo global graph # needed for yolo to read the images stuff to do with multi threading graph = tf.get_default_graph() self.Y = yolo.YOLO() print(" - setting Tracking module") from .Modules.Tracking_module import multi_Tracker_Module, Tracker self.multi_Tracker = multi_Tracker_Module(dim=5, labeled=True) self.multi_Tracker.start() #must be after the yolo import for some reason print(" - loading Mido module") from .Modules.Midi_output_module import MidiOutMod, float_to_midi self.midiout = MidiOutMod('midoVCV 2') #tracks the number of objects on channel 1 self.nb_objectstomidi = float_to_midi('nb_objects', [0, 5], 10, 1) self.midiout.signals.append(self.nb_objectstomidi) #track the size of objects on channel 2 self.sizetomidi = float_to_midi('size', [0, 100000], 10, 2) self.midiout.signals.append(self.sizetomidi) self.moduleslist.append(self.midiout) self.moduleslist.append(self.multi_Tracker) self.midiout.start() print(" - visual stuff") #cv2 put text stuff self.font = cv2.FONT_HERSHEY_SIMPLEX self.fontScale = 1 self.lineType = 2
def __init__(self, threadID, input_shape): exp.__init__(self, threadID, input_shape) self.name = "Basic experience" print(input_shape) self.white_screen = np.ones(input_shape) * 100