def init(avatars, toRemove, ambientBusAudioIn, scene): global avs, remove, ambientBusAudio avs = avatars remove = toRemove ambientBusAudio = ambientBusAudioIn global SCENE SCENE = scene global busCloseAudio1, busCloseAudio2 global introAudio introAudio = viz.addAudio('Audio/AvatarStories_Directions.wav') busCloseAudio1 = viz.addAudio('Audio/Bus Scene/BusClosing_pt1.wav') busCloseAudio2 = viz.addAudio('Audio/Bus Scene/BusClosing_pt2.wav') global selector selector = NodeSelectionWithFeedbackOptions.Selector(SCENE, True, True) selector.disable() selector.selectedAudio.volume(.1) global dc dc = DataCollector(filePrefix='Data/AvatarStoriesScene_Participant', fileSuffix='.csv') dc.addMainviewTracker() dc.startCollecting()
def load(sceneRef=viz.MainScene): global SCENE SCENE = sceneRef # TODO: Add scene params to all viz.add calls, etc in this module and submodules #Add audio global introAudio introAudio = viz.addAudio('Audio/Bus Scene/BusIntroduction.wav') global ambientAudio ambientAudio = viz.addAudio('Audio/Bus Scene/BusAmbientNoise.wav') #add bus BUS_OBJ_SCALE = .0365 BUS_OSGB_SCALE = 35 curscale = BUS_OSGB_SCALE bus = vizfx.addChild('bus_obj/Bus_Final_Scene1.OSGB', scene=SCENE) bus.setScale([curscale]*3) #bus.emissive(1,1,1) # Makes it look like daytime # add homeless avatars global avatars avatars = avatarconfig.initAvatars(SCENE) # create reference to backpack and thief for fading. The .fade attribute = True means that the # object should fade if not in view. pack_objs = [] pack = bus.getChild('pack') straps = bus.getChild('backpackstraps') avatars[1].fade = False pack.fade = True straps.fade = True pack_objs.append(avatars[1]) pack_objs.append(pack) pack_objs.append(straps) viz.MainView.getHeadLight().disable() global controlAvOnKeyPress, controlViewOnKeyPress controlAvOnKeyPress = vizact.onkeydown('c', keyboardcontrol.control, avatars[0]) controlViewOnKeyPress = vizact.onkeydown('v', keyboardcontrol.control, MainView) if RIFT: creepySeatChanging = CreepySeatChanging.init(avatars[0], pack_objs, introAudio, vfov=VFOV_RIFT, hfov=HFOV_RIFT) else: creepySeatChanging = CreepySeatChanging.init(avatars[0], pack_objs, introAudio, vfov=VFOV_DEFAULT) CreepySeatChanging.disable() #BusLighting.addLighting(SCENE) BusLighting.addStreetlights(SCENE) global toggleBusLightsOnKeyPress toggleBusLightsOnKeyPress = vizact.onkeydown('z', BusLighting.toggleBusLighting) global dc dc = DataCollector(filePrefix='Data/CreepySeatTrackingData_Participant', fileSuffix='.csv') dc.addMainviewTracker() dc.startCollecting()
def chooseExpStateNLoadSound(): global introAudioHMD, introAudioBasic, endingAudio, endingAudioBasic audioFile = 'audio/' + CONDITION_CHOICES[currentCondition] + '.wav' introAudioHMD = aboveLocation.playsound(audioFile, viz.STOP, volume = 0.75) introAudioBasic = viz.addAudio(audioFile) audioEndFile = 'audio/' + CONDITION_CHOICES[currentCondition] + 'Ending.wav' endingAudio = aboveLocation.playsound(audioEndFile, viz.STOP, volume = 0.75) endingAudioBasic = viz.addAudio(audioEndFile)
def init(qHaveSimultaneous): if qHaveSimultaneous: vizconnect.go('vizconnectSetups/vive_ppt_intersense.py') else: vizconnect.go('vizconnectSetups/viveLighthouse.py') piazza = viz.add('piazza.osgb') piazza2 = viz.add('piazza_animations.osgb') vive = vizconnect.getRawTracker('head_tracker') if qHaveSimultaneous: optHead = vizconnect.getRawTracker('optical_heading') PPT1 = vizconnect.getRawTracker('ppt_1') PPT2 = vizconnect.getRawTracker('ppt_2') PPT3 = vizconnect.getRawTracker('ppt_3') PPT4 = vizconnect.getRawTracker('ppt_4') inertiacube = vizconnect.getRawTracker('inertiacube') # check controller is on steamvr.getControllerList()[0].isButtonDown(2) def getTrackInfo(): if 1: if qHaveSimultaneous: return { 'vive': vive.getPosition() + vive.getEuler(), 'optical_heading': optHead.getPosition() + optHead.getEuler(), 'PPT1': PPT1.getPosition(), 'PPT2': PPT2.getPosition(), 'PPT3': PPT3.getPosition(), 'PPT4': PPT4.getPosition(), 'inertiacube': inertiacube.getEuler() } else: return {'vive': vive.getPosition() + vive.getEuler()} else: return { 'viveController': steamvr.getControllerList()[0].getPosition() + steamvr.getControllerList()[0].getEuler() } trialStartSound = viz.addAudio('sounds/quack.wav', play=0, volume=2.) trialCompletedSound = viz.addAudio('sounds/pop.wav', play=0, volume=2.) alarmSound = viz.addAudio('alarm.wav', play=0, volume=2.) # make return values Sounds = namedtuple('Sounds', 'trialStart trialCompleted alarm') InitReturn = namedtuple('initReturn', 'visuals sounds trackerDataFun') return InitReturn(visuals=[piazza, piazza2], sounds=Sounds(trialStart=trialStartSound, trialCompleted=trialCompletedSound, alarm=alarmSound), trackerDataFun=getTrackInfo)
def chooseSound(self): temp = random.randint(0, 4) temp2 = random.randint(5, 11) if (self.model.id == self.xWing.id): sound = viz.addAudio(self.sounds[temp]) sound.volume(.5) sound.play() else: sound = viz.addAudio(self.sounds[temp2]) sound.volume(.5) sound.play()
def onKeyDown(self,key): if key == '1': self.avatar.stopAction(1) self.avatar.state(0) elif key == "2": self.avatar.state(1) elif key == "3": mySound = viz.addAudio( 'mazin.mp3' ) mySound.loop( viz.ON ) mySound.play() #self.avatar.execute(1) #opening video test #title text self.textScreen = viz.addText('Mazinger Animation Demo',viz.SCREEN) #self.textScreen.alignment(viz.ALIGN_CENTER_BOTTOM) self.textScreen.setScale([1,1,1]) #self.textScreen.setPosition([0,10,0]) self.starttimer(2,1,22) self.object = viz.addTexQuad(size=2) video = viz.addVideo('open.avi') video.setRate(1) video.volume(0) video.play() #fixing video position self.object.texture(video) self.object.setAxisAngle( [0, 1, 0 , 180] ) self.object.setPosition([0,10,18]) #self.avatar.setAnimationSpeed(1, .5) #run at half speed elif key == "4": viz.MainView.setPosition([0,10,20]) viz.lookAt([0,10,0]) self.starttimer(1,1,8) self.avatar.execute(2)
def init(flagPile): global lhandtar, rhandtar, placeFlagSound, pickupFlagSound # viz.playSound(globals_oa.AUDIO_DIRECTORY + 'bell.wav', viz.SOUND_PRELOAD) placeFlagSound = viz.addAudio(globals_oa.AUDIO_DIRECTORY + "flag_in_ground.mp3") placeFlagSound.play() placeFlagSound.stop() pickupFlagSound = viz.addAudio(globals_oa.AUDIO_DIRECTORY + "flag_in_hand.mp3") pickupFlagSound.play() pickupFlagSound.stop() # adding a manager for touching the snails global snailTouchManager snailTouchManager = vizproximity.Manager() # snailTouchManager.setDebug(viz.ON) vizact.onkeydown("k", snailTouchManager.setDebug, viz.TOGGLE) # adding sensor for the flag pile flagPileSensor = vizproximity.Sensor(vizproximity.Sphere(0.3, center=(-0.8, -1, -1.8)), source=flagPile) flagPileManager = vizproximity.Manager() flagPileManager.addSensor(flagPileSensor) vizact.onkeydown(",", flagPileManager.setDebug, viz.TOGGLE) flagPileManager.onEnter(flagPileSensor, EnterProximity) global rhandtar, lhandtar if globals_oa.rhModel is not None: rhsphere = vizshape.addSphere(radius=0.06) rhsphere.setParent(globals_oa.rhModel) rhsphere.setPosition([0, 0, 0.55]) rhsphere.disable(viz.RENDERING) rhandtar = vizproximity.Target(rhsphere) # globals_oa.rhModel) snailTouchManager.addTarget(rhandtar) flagPileManager.addTarget(rhandtar) if globals_oa.lhModel is not None: lhsphere = vizshape.addSphere(radius=0.06) lhsphere.setParent(globals_oa.lhModel) lhsphere.setPosition([0, 0, 0.55]) lhsphere.disable(viz.RENDERING) lhandtar = vizproximity.Target(lhsphere) # globals_oa.lhModel) snailTouchManager.addTarget(lhandtar) flagPileManager.addTarget(lhandtar) # adding the sensors around the snails for snailAndFlagItem in globals_oa.snailAndFlagItemsInZone1: snailSensor = vizproximity.addBoundingSphereSensor(snailAndFlagItem.snailItemFromZoneModel, scale=(3.0)) snailTouchManager.addSensor(snailSensor) snailTouchManager.onEnter(None, ShowFlagAtSnail)
def __init__(self, subject, start_from_trial=0): """ Setup data path, room, ball, baseball glove and trial list. Initialize events and actions. :param subject: subject nr :param start_from_trial: number of first trial (used when experiment crashed) """ # call superclass constructor viz.EventClass.__init__(self) # create data path self.data_dir = 'data/{}/'.format(subject) # read experiment info (config dict) with open(self.data_dir + 'exp_info.txt') as info_file: config = json.load(info_file) # setup hardware hardware.setup(config['hardware']) # initialize visual environment print '\nSetting up visual environment...' self.room = Room(config) self.config = config['experiment'] # add ball ball_size = self.config['ball_size'] ball_initial_pos = [0, ball_size, 36.22] self.ball = Ball(room=self.room, size=ball_size, position=ball_initial_pos, color=[1, 1, 1]) self.ball.setAlpha(0) self.glove = None self.catch_sound = None # add glove if self.config['glove']: self.glove = BaseballGlove(room=self.room, size=self.config['glove_radius'], marker_num=1, hand=config['experiment']['hand']) self.catch_sound = viz.addAudio('res/sounds/applause.wav') # initialize trial list self.trials = pd.read_csv(self.data_dir + 'trials.csv').to_dict('records') self.trial_num = start_from_trial - 1 self.current_trial = None self.current_block = None self.message_panel = None if hardware.hmd: # HMD message panel for showing messages between blocks and at the end of the experiment self.message_panel = hardware.hmd.addMessagePanel('You did it!') self.message_panel.visible(False) # key-press events (for starting the experiment) self.callback(viz.KEYDOWN_EVENT, self.on_key_down) # handle collisions (register when glove touches ball) vizact.onupdate(0, self.handle_collisions) # play sound if participant leaves allowed area self.allowed_area_action = vizact.ontimer(0.7, self.check_allowed_area)
def onClick(self, click): if self.picking: choice = viz.pick(info=True) if choice.valid: if choice.object.id in [self.xWing.id, self.tie.id]: if choice.object.id == self.xWing.id: print("xwing") self.model = self.xWing self.tie.remove() self.winSong = 'audio/good-win.wav' self.loseSong = 'audio/good-fail.wav' self.ambient = viz.addAudio('audio/xWing-ambient.wav') self.ambient.loop(viz.ON) self.ambient.play() self.picking = False elif choice.object.id == self.tie.id: print("tie") self.model = self.tie self.rotmatrix.postAxisAngle(0, 1, 0, 180) self.xWing.remove() self.winSong = 'audio/bad-win.wav' self.loseSong = 'audio/bad-lose.wav' self.ambient = viz.addAudio('audio/tie-ambient.wav') self.ambient.loop(viz.ON) self.ambient.play() self.picking = False self.setOrientation() self.setView() self.trench = self.addTrench() self.trench.collideMesh() self.model.collideMesh() self.model.enable(viz.COLLIDE_NOTIFY) self.model.enable(viz.LIGHTING) self.starttimer(1, viz.FASTEST_EXPIRATION, viz.FOREVER) self.starttimer(3, 2, viz.FOREVER)
def __init__(self, index, x, y, z): self.index = index self.center = x, y, z self.sphere = vizshape.addSphere(0.1) self.sphere.setPosition(self.center) self.sphere.color(viz.WHITE) self.sensor = vizproximity.addBoundingSphereSensor(self.sphere, scale=1) self.signal = viztask.Signal() self.sound = viz.addAudio('{:02d}.wav'.format(index))
def setAudio(self, audioFile): if self.audio: self.audio.remove() if audioFile is None: self.audio = None else: if self.PPT1: self.audio = self.audio_node.playsound(audioFile, viz.STOP, volume=1) else: self.audio = viz.addAudio(audioFile)
def prepareForObjectSelling( obj ): global SCENE, APARTMENT, OBJECTS, RIFT audio = obj.narrationFile[0] sound = viz.addAudio(audio) duration = sound.getDuration() yield viztask.waitTime(1) sound.play() print "now playing audio for prepare for selling object" yield viztask.waitTime(duration) ObjectSelling.init(APARTMENT, OBJECTS, SCENE, RIFT) ObjectSelling.enable()
def loadAudioFile(filePath): loadedAudioFile = None # if globals_oa.RENDER_ENV == globals_oa.RENDER_ENV_CHOICES['VHIL']: # import vizsonic ## if globals_oa.audioMixer is not None: ## loadedAudioFile = viz.audioMixer.play(filePath, viz.SOUND_PRELOAD) ## else: # loadedAudioFile = globals_oa.user.playsound(filePath, viz.SOUND_PRELOAD, volume=1.0) # else: loadedAudioFile = AudioWrapper.AudioWrapper(viz.addAudio(filePath)) return loadedAudioFile
def playNarrationFile( obj=None ): #global selector #selector.disable() selector.removeOnDeselectCallback(RADIO) selector.removeOnSelectCallback(RADIO) selector.disable() print 'narrating' audio = obj.narrationFile[0] sound = viz.addAudio(audio) duration = sound.getDuration() sound.play() print "playing " + str(audio) vizact.ontimer2(duration+DELAY_UNTIL_NARRATION, 0, checkForNextEvent, obj) del obj.narrationFile[0]
def init(avatar, pack_objs, intro, **kwargs): global isBusScene, moveSeats, seatQueue isBusScene = False DetectWhetherInView.init(**kwargs) seats = [Seats.getseatplacement(id)[0] for id in avatar.teleSeatIDs] # TODO: Abstract to handle arbitrary orientation too (for front right seats) seatQueue = deque([[seat[0],seat[1],seat[2]-0.5] for seat in seats]) # Node is in bus aisle global introAudio1, introAudio2 introAudio1 = intro introAudio2 = viz.addAudio('Audio/CreepyTaskIntroduction.wav') FadeObject.init(pack_objs, 25, 0.01, **kwargs) viztask.schedule( creepilyMoveCloser(avatar) )
def logo_start(): sound = viz.addAudio('sound/Omega-FlyHyperspace.wav') sound.play() fadeout = vizact.fadeTo(0, time=2) fadein = vizact.fadeTo(1, time=4) yield viztask.waitTime(2) text3D_G.addAction(fadeout) text3D_L.addAction(fadeout) text3D_GL.addAction(fadeout) text3D_nagnao.addAction(fadeout) quad.addAction(fadein) yield viztask.waitTime(0.1) yield quad.addAction(fadeout)
def myTask(): sound = viz.addAudio('sound/samplerButton.wav') sound.play() fadeout = vizact.fadeTo(0, time=2) yield text3D.addAction(fadeout) yield text3D.disable(True) yield grid.addAction(fadeout) yield viztask.waitTime(3) # ここでGLNAGANOのロゴ表示 import opning yield opning.Logo() yield viztask.waitTime(10) import promothion promothion
def __init__(self, scene=viz.MainScene, crosshairFeedback = False, audioFeedback = False): self.scene = scene self.crosshair, self.crosshairLink = AimCenterScreen.addAimer(CROSSHAIR_DEFAULT_FILENAME, .1, scene=self.scene) self.crosshairFeedback = crosshairFeedback if crosshairFeedback: # self.crosshairWithCallback, self.crosshairWithCallbackLink = AimCenterScreen.addAimer(CROSSHAIR_SELECTED_FILENAME, .2, scene=self.scene) self.crosshairWithCallback, self.crosshairWithCallbackLink = AimCenterScreen.addAimer(CROSSHAIR_SELECTED_FILENAME, .2, scene=self.scene, incomingColor = viz.GREEN) self.audioFeedback = audioFeedback if audioFeedback: self.selectedAudio = viz.addAudio(AUDIO_SELECTED_FILENAME) self.enabled = None self.selected = None self.deselected = None # Stores the most recently deselected node self.callOnSelect = {} # Map of callbacks for when individual nodes are selected self.callOnDeselect = {} # Map of callbacks for when individual nodes are deselected self.disable()
def add_and_play_background_noise(self, volume): """ background noise for noise cancellation Args: volume: volume of background noise Returns: an audio object. """ self.noise = viz.addAudio('white-noise_192k.wav') self.noise.loop(viz.ON) self.noise.volume(volume) self.noise.play() return self.noise
def __init__(self): ################################################################ ################################################################ ## Register sounds. It makes sense to do it once per experiment. self.bounce = viz.addAudio('/Resources/bounce.wav') self.buzzer = viz.addAudio('/Resources/BUZZER.wav') self.bubblePop = viz.addAudio('/Resources/bubblePop3.wav') self.cowbell = viz.addAudio('/Resources/cowbell.wav') self.highdrip = viz.addAudio('/Resources/highdrip.wav') self.gong = viz.addAudio('/Resources/gong.wav')
def initAvatars(scene=viz.MainScene): avatars = [] for d in AVATAR_DATA: avatar = vizfx.addChild(d.file, scene=scene) isChild = True if d.name == 'son/father' else False pos, ori = Seats.getseatplacement(d.seat, 1, isChild) avatar.setPosition(pos) avatar.setEuler(ori) avatar.state(d.state) avatar.stateNum = d.state # TODO: Extend .state method to do this? if d.name == 'son/father': avatar.story = avatars[2].story else: avatar.story = viz.addAudio(d.story) avatar.name = d.name avatar.done = d.done avatar.setScale([d.scale,d.scale,d.scale]) avatars.append(avatar) return avatars
def __init__(self, scene=viz.MainScene, crosshairFeedback = False, audioFeedback = False): self.scene = scene self.crosshair, self.crosshairLink = aimcenterscreen.addAimer(CROSSHAIR_DEFAULT_FILENAME, .1, scene=self.scene) self.crosshairFeedback = crosshairFeedback if crosshairFeedback: # self.crosshairWithCallback, self.crosshairWithCallbackLink = aimcenterscreen.addAimer(CROSSHAIR_SELECTED_FILENAME, .2, scene=self.scene) self.crosshairWithCallback, self.crosshairWithCallbackLink = aimcenterscreen.addAimer(CROSSHAIR_SELECTED_FILENAME, .2, scene=self.scene, incomingColor = viz.GREEN) self.audioFeedback = audioFeedback if audioFeedback: if globals_oa.RENDER_ENV == globals_oa.RENDER_ENV_CHOICES['VHIL']: import vizsonic self.selectedAudio = globals_oa.user.playsound(AUDIO_SELECTED_FILENAME, viz.PLAY, volume=1.0) else: self.selectedAudio = viz.addAudio(AUDIO_SELECTED_FILENAME) self.enabled = None self.selected = None self.deselected = None # Stores the most recently deselected node self.callOnSelect = {} # Map of callbacks for when individual nodes are selected self.callOnDeselect = {} # Map of callbacks for when individual nodes are deselected self.disable()
def load(sceneRef=viz.MainScene): global SCENE SCENE = sceneRef # TODO: Add scene params to all viz.add calls, etc in this module and submodules SCENE = viz.MainScene # TODO: Add scene params to all viz.add calls, etc in this module and submodules # add bus BUS_OBJ_SCALE = 0.0365 BUS_OSGB_SCALE = 35 curscale = BUS_OSGB_SCALE bus = vizfx.addChild("bus_obj/Bus_Final_Scene2.osgb", scene=SCENE) bus.setScale([curscale] * 3) # bus.emissive(1,1,1) # Makes it look like daytime # add bus ambient noise global ambientAudio ambientAudio = viz.addAudio("Audio/Bus Scene/BusAmbientNoise.wav") # add homeless avatars global avatars avatars = avatarconfigStories.initAvatars(SCENE) # nodes to be removed at the end of the bus scene global toRemove toRemove = getRemoveNodes(bus) # make exit sign a node for selection to allow for ending the scene. exit = bus.getChild("Plane007") exit.name = "exit" exit.done = False avatars.append(exit) # init Avatar Stories and narration AvatarStories.init(avatars, toRemove, ambientAudio, SCENE) # BusLighting.addLighting(SCENE) BusLighting.addStreetlights(SCENE) global toggleBusLightsOnKeyPress toggleBusLightsOnKeyPress = vizact.onkeydown("z", BusLighting.toggleBusLighting)
#viz.link(hmd.getSensor(), viz.MainView) viz.setMultiSample(4) viz.fov(60) viz.go() viz.collision(viz.ON) viz.gravity(10) viz.phys.enable() #Add message panel and audio message_panel = vizdlg.MessagePanel('') message_panel.setPanelScale(2.0) message_panel.setScreenAlignment(viz.ALIGN_CENTER_TOP) message_panel.visible(True) message_panel.setText('Welcome to VR paddle boarding experience!') audio = viz.addAudio('thatGoodShip.mp3') audio.play() class PlayerPrefs: def __init__(self, is_right_handed, height): self.is_right_handed = is_right_handed self.height = height # PUBLIC def paddle_scale(self): # Scale paddle to 3/4 height s = 2 * self.height return [s, s, s]
LABEL_DATA = [ struct ( text = 'Days Left to Pay Debt', value = 12 ), struct ( text = 'Amount Left to Pay', value = 700.0 ), ] SELL_AUDIO = 'Audio/Apartment Scene/ChaChingSound.wav' # Audio to play upon selling an object PLEASE_SELL_AUDIO = 'Audio/Apartment Scene/ItemSelling_Directions_Pt2.wav' # Audio to ask user to sell next object FINAL_EVICT_NARRATIVE = 'Audio/Apartment Scene/LandlordScene2.wav' global sellNarrative, chaChingSound, finalEvictNarrative chaChingSound = viz.addAudio(SELL_AUDIO) sellNarrative = viz.addAudio(PLEASE_SELL_AUDIO) finalEvictNarrative = viz.addAudio(FINAL_EVICT_NARRATIVE) def sellSelectedObject(objectToSell=None): if SELL_ON_CLICK: global SELLABLES if selector.select() not in SELLABLES: selector.disable() waitForNextClick() else: objectToSell = selector.select() global dc print 'selling object...' objectToSell.addAction(OBJ_FADE_ACTION)
import viz import vizshape import vizcam import math import vizact from Environment2 import * # set size (in pixels) and title of application window viz.window.setSize(640, 480) viz.window.setName("Taxi Driver!") # get graphics window window = viz.MainWindow sound = viz.addAudio('MoonlightSonata.wav') sound.volume(.5) #sound.setTime(1) #sound.setRate(0.7) sound.play() # get mini map miniMap = viz.addWindow() miniMap.setSize([.3, .4]) miniMap.setPosition([0, 1]) miniMap.fov(70) miniMapView = viz.addView() miniMap.setView(miniMapView) miniMapView.setPosition([250, 450, 275]) miniMapView.setEuler([90, 90, 0])
""" import viz import vizact import vizproximity import viztask import vizinfo viz.setMultiSample(4) viz.fov(60) viz.go() #Add info panel to display messages to participant instructions = vizinfo.InfoPanel(icon=False,key=None) #Add ambient sound piazzaSound = viz.addAudio('piazza.mp3') piazzaSound.play() piazzaSound.loop() piazza = viz.add('piazza.osgb') #Move the viewpoint to the starting location viz.MainView.move([10.5,0,20.5]) viz.MainView.setEuler([-90,0,0]) #Add male and female avatars in conversation male = viz.addAvatar('vcc_male.cfg',pos=[-2.6,0,10.4],euler=[-40,0,0]) female = viz.addAvatar('vcc_female.cfg',pos=[-3.4,0,11.2],euler=[140,0,0]) male.state(14) female.state(14)
viz.go(viz.FULLSCREEN) # Simulate head tracker using keyboard/mouse navigator head_tracker = vizcam.addWalkNavigate() head_tracker.setPosition([0,1.5,0]) # Add pit model model = viz.add('pit.osgb') model.hint(viz.OPTIMIZE_INTERSECT_HINT) # Get handle to platform object platform = model.getChild('platform') platform.raised = False platform.positions = [[0,0,0],[0,7,0]] platform.audio_start = viz.addAudio('sounds/platform_start.wav') platform.audio_running = viz.addAudio('sounds/platform_running.wav',loop=True) platform.audio_stop = viz.addAudio('sounds/platform_stop.wav') def TogglePlatform(): """Toggle raising/lower of platform""" platform.raised = not platform.raised pos = platform.positions[platform.raised] platform.audio_start.stop() platform.audio_start.play() platform.audio_running.play() platform.runAction(vizact.moveTo(pos,speed=2.0)) platform.addAction(vizact.call(platform.audio_stop.play)) platform.addAction(vizact.call(platform.audio_running.pause))
def __init__(self, trial_num, trial_info, ball, glove, data_dir, parent): """Setup trial info and actions. :param trial_num: # of trial in Experiment :param trial_info: dict containing trial info :param ball: the ball used in the Experiment :param glove: a BaseballGlove object :param data_dir: directory to save data log file :param parent: parent Experiment """ self.ball = ball self.glove = glove self.parent = parent self.data_filename = data_dir + "/trial" + str(trial_num) + "data.txt" # data file is only initialized when ball is launched self.data_file = None # set up logging self.log_action = vizact.onupdate(0, self.log_data) self.log_action.setEnabled(False) self.timer = 0 # get trial info self.trial_info = trial_info self.start_pos = eval(trial_info['start_pos']) self.is_fly_ball = trial_info['fly_ball'] speed = trial_info['speed'] self.ball_dir = eval(trial_info['ball_dir']) self.ball_pos = eval(trial_info['ball_pos']) if trial_info['mirror']: # change position # change direction self.ball_dir[0] = -self.ball_dir[0] self.ball_pos[0] = -self.ball_pos[0] if self.is_fly_ball: self.parent.room.physenv.world.setGravity([0, -9.8, 0]) else: self.parent.room.physenv.world.setGravity([0, 0, 0]) # reset ball self.ball.setPosition(self.ball_pos) self.ball.setVelocity([0, 0, 0]) self.ball.setAlpha(0) self.ball.setContrastMode(trial_info['contrast_mode']) # print info for next trial info_texts = ['\nTrial ' + str(trial_num), 'Fly ball: ' + str(self.is_fly_ball), 'Speed ' + str(speed), 'Contrast: ' + str(trial_info['contrast_mode']), 'Ball pos: ' + str(self.ball_pos), 'Ball direction: ' + str(self.ball_dir), 'Mirrored: ' + str(trial_info['mirror'])] network.send_message('') for text in info_texts: print text network.send_message(text) pole_pos = list(self.ball_pos) pole_pos[1] = 0 # set up poles (for start position and position to be faced) self.start_pole = viz.addChild('pole.wrl') self.start_pole.setPosition(self.start_pos) # TODO: make margin flexible? # 0.24 is pole size # 0.5 / 0.24 ~= 2 -> scale pole up to 2 self.start_pole.setScale([2, 0.35, 2]) self.start_pole.color(viz.BLUE) self.start_pole.alpha(0.5) self.ball_pole = viz.addChild('pole.wrl') self.ball_pole.setPosition(pole_pos) self.launch_pole = self.ball_pole.copy() self.launch_pole.setPosition(pole_pos) self.launch_pole.color(viz.RED) self.launch_pole.setScale([1.1, 0, 1.1]) self.start_timer = 0 self.countdown_sound = viz.addAudio('res/sounds/beeps.wav') # set up pre-launch phase self.await_action = vizact.onupdate(0, self.await_launch) self.await_end_task = None self.has_started = False self.timer = 0
manager.onEnter(sensorPlane5, EnterProximity) #OnEXIT manager.onExit(sensorGem31, exitGem) manager.onExit(sensorGem51, exitGem) manager.onExit(sensorGem52, exitGem) manager.onExit(sensorGem53, exitGem) manager.onExit(sensorGem12, removeSubWindow) manager.onExit(sensorGem13, removeSubWindow) manager.setDebug(viz.OFF) vizact.onkeydown('d', manager.setDebug, viz.TOGGLE) MainMenu() chime_sound = viz.addAudio(sound['chime']) elevator_sound = viz.addAudio(sound['elevator']) explosion_sound = viz.addAudio(sound['deslave']) hammer_sound = viz.addAudio(sound['hammer']) dirt_sound = viz.addAudio(sound['dirt']) cave_sound = viz.addAudio(sound['cave']) viz.MainView.eyeheight(4.75) #viz.MainView.setPosition(-65,5,8) toogleCollide(True) joy = vizjoy.add() print "MenuPrincipal" vizact.ontimer(0, UpdateJoystick)
def loadAudioAssets(): globals_oa.loadAudioAssetsCalled = True globals_oa.AUDIO_SUCCESS = loadAudioFile('sounds/sfx/sfx_success.wav') globals_oa.AUDIO_SUCCESS.volume(0.5) globals_oa.AUDIO_PICKUP = loadAudioFile('sounds/sfx/sfx_pickup.wav') globals_oa.AUDIO_PICKUP.volume(0.25) globals_oa.AUDIO_DROP = loadAudioFile('sounds/sfx/sfx_drop.wav') globals_oa.AUDIO_DROP.volume(0.25) globals_oa.AUDIO_BUBBLES = loadAudioFile('sounds/sfx/sfx_bubbles.wav') #### TCR 3.0 (TCR for DC) Audio # globals_oa.GLOBE_SCENE_AUDIO = loadAudioFile('sounds/tcr_dc/globeRoomSoundtrack.mp3') globals_oa.GLOBE_FAIL_AUDIO = loadAudioFile('sounds/tcr_dc/TCR_opening_touch_repeat_hv.mp3') # globals_oa.CITY_SCENE_AMBIENT = loadAudioFile('sounds/tcr_dc/citySceneAmbient.mp3') # globals_oa.CAR_SCENE1 = loadAudioFile('sounds/tcr_dc/CarScene1.wav') globals_oa.CAR_FAIL_AUDIO = loadAudioFile('sounds/tcr_dc/TCR_city_scene_touch_repeat_hv.mp3') # globals_oa.CAR_SCENE2 = loadAudioFile('sounds/tcr_dc/CarScene2.wav') globals_oa.GLOBE_SCENE_AUDIO1 = loadAudioFile('sounds/tcr_dc/TCR_opening_v3_score_hv.mp3') globals_oa.GLOBE_SCENE_AUDIO1.volume(0.4) globals_oa.GLOBE_SCENE_AUDIO2 = loadAudioFile('sounds/tcr_dc/TCR_opening_v3_narration_hv.mp3') globals_oa.CITY_SCENE_AMBIENT = loadAudioFile('sounds/tcr_dc/citySceneAmbient.mp3') globals_oa.CAR_SCENE1 = loadAudioFile('sounds/tcr_dc/TCR_city_scene_p1_v3_hv.mp3') globals_oa.CAR_SCENE2 = loadAudioFile('sounds/tcr_dc/TCR_city_scene_car_look_hv.mp3') globals_oa.CAR_SCENE3 = loadAudioFile('sounds/tcr_dc/TCR_city_scene_touch_first_hv.mp3') globals_oa.CAR_SCENE4 = loadAudioFile('sounds/tcr_dc/TCR_city_scene_p3_v3_hv.mp3') globals_oa.CAR_SCENE5 = loadAudioFile('sounds/tcr_dc/TCR_city_scene_p4_v3_hv.mp3') globals_oa.CAR_START = loadAudioFile('sounds/tcr_dc/carStartSoundCut.mp3') globals_oa.BOAT_SCENE_AMBIENT = loadAudioFile('sounds/tcr_dc/TCR_boat_ambient.mp3') globals_oa.BOAT_SCENE_AMBIENT.volume(0.1) globals_oa.BOAT_SCENE1 = loadAudioFile('sounds/tcr_dc/TCR_boat_p1_v4_hv.mp3') # globals_oa.BOAT_SCENE2 = loadAudioFile('sounds/tcr_dc/TCR_boat_p2_v4_hv.mp3') globals_oa.BOAT_SCENE2 = viz.addAudio('sounds/tcr_dc/TCR_boat_p2_v4_hv.mp3') globals_oa.BOAT_SCENE3 = loadAudioFile('sounds/tcr_dc/TCR_boat_p3_v3_hv.mp3') globals_oa.BOAT_SCENE4 = loadAudioFile('sounds/tcr_dc/TCR_boat_p4_v4_hv.mp3') globals_oa.BOAT_SCENE_TOUCH = loadAudioFile('sounds/tcr_dc/TCR_boat_please_touch_prompt.mp3') globals_oa.SPHERICAL_VIDEO_SCORE = loadAudioFile('sounds/tcr_dc/TCR_360_music_short_hv.mp3') globals_oa.SPHERICAL_VIDEO_SCORE.volume(0.25) globals_oa.SPHERICAL_VIDEO_NARRATION = loadAudioFile('sounds/tcr_dc/TCR_360_narration_short_hv.mp3') globals_oa.SPHERICAL_VIDEO_NARRATION.volume(1.0) globals_oa.TRANSITION_NARRATION = viz.addAudio('sounds/tcr_dc/TCR_transition_to_CG_v3_short_hv.mp3') globals_oa.TRANSITION_NARRATION.volume(1.0) globals_oa.UNDERWATER_AMBIENT = viz.addAudio('sounds/tcr_dc/CG_Ocean_ambient_hv.mp3') globals_oa.UNDERWATER_AMBIENT.volume(0.3) globals_oa.TRANSITION_TO_CG = loadAudioFile('sounds/tcr_dc/TCR_transition_to_CG_v3_short_hv.mp3') globals_oa.TRANSITION_TO_CG.volume(1.0) globals_oa.CG_SCENE1_1 = loadAudioFile('sounds/tcr_dc/TCR_CG1_p1_v4_hv.mp3') globals_oa.CG_SCENE1_2 = loadAudioFile('sounds/tcr_dc/TCR_CG1_p2_v5_hv.mp3') globals_oa.CG_SCENE2_1 = loadAudioFile('sounds/tcr_dc/TCR_CG2_p1_v2_hv.mp3') globals_oa.CG_SCENE2_2 = loadAudioFile('sounds/tcr_dc/TCR_CG2_p2_v3_hv.mp3') globals_oa.CG_FAIL = loadAudioFile('sounds/tcr_dc/TCR_CG1_reach_down_repeat.mp3') globals_oa.finalTransitionSound = viz.addAudio('sounds/tcr_dc/TCR_transition_v1.mp3') globals_oa.finalTransitionSound.play() globals_oa.finalTransitionSound.stop() globals_oa.FINAL_SCENE = viz.addAudio('sounds/tcr_dc/TCR_Final_Scene_all_v4_hv.mp3') globals_oa.FINAL_SCENE.play() globals_oa.FINAL_SCENE.stop()
global global_sound #Here we add our elephant photo to the current scene we're in this_texture = viz.addTexture(this_node.image) this_sphere.texture(this_texture) #Here we lock down the textbox to our scene, first we initilize if this_node.info_image is not None: print("Info Image:" + this_node.info_image) this_texture = viz.addTexture(this_node.info_image) global_plane.texture(this_texture) if global_sound is not None: global_sound.stop() global_sound = viz.addAudio(this_node.audio) global_sound.loop(viz.ON) if this_node.audio == "sounds/bushfield.mp3": global_sound.volume(.3) else: global_sound.volume(.5) global_sound.play() # def globalAudio(): # #Creates global variable in order to constantly be looping the # #audio between scenes # sound = viz.addAudio('sounds/natureSounds.mp3') # sound.loop(viz.ON)
import viz birds = viz.addAudio('birds.wav') boing = viz.addAudio('BOING!.WAV') bounce = viz.addAudio('bounce.wav') buzzer = viz.addAudio('BUZZER.WAV') viz.res.addPath('Resources') bubble = viz.addAudio('bubblePop3.wav') cowbell = viz.addAudio('cowbell.wav') drip = viz.addAudio('highdrip.wav') gong = viz.addAudio('gong.wav')
import os import time import viz for f in os.listdir('C:/Program Files/WorldViz/Vizard4/Resources/'): if f.lower().endswith('.wav'): print f a = viz.addAudio(f) a.play() time.sleep(3) a.stop()
""" The disappearing cross task reimplemented from Blascovich & Katkin 1993. [email protected] """ from fmri_trigger import TRIGGER_EVENT, RIGHT_BUTTON_EVENT, LEFT_BUTTON_EVENT import viz, vizact, viztask, vizinfo # Images cue = viz.addTexture("images/cue.png") hbar = viz.addTexture("images/hbar.png") vbar = viz.addTexture("images/vbar.png") cross = viz.add("images/cross.png") # Sounds correct_sound = viz.addAudio("images/beep-8.wav") incorrect_sound = viz.addAudio("images/beep-3.wav") # Text for feedback block_text = viz.addText("",parent=viz.SCREEN) block_text.setPosition(0.5,0.8) block_text.alignment(viz.ALIGN_CENTER_CENTER) block_text.font("times.ttf") MESSAGE_TIME = 1 # ---------- Configure so responses are mapped to cross components -- HBAR_RESPONSE = viztask.waitEvent(LEFT_BUTTON_EVENT,all=True) VBAR_RESPONSE = viztask.waitEvent(RIGHT_BUTTON_EVENT,all=True) # ------------------------------------------------------------------- #Add quad to screen quad = viz.addTexQuad( viz.SCREEN , pos=(0.5,0.5,0) , scale=(5,5,5) ) #quad.texture(cross) def training_display(rt,acc):
import viz import vizact import vizshape import vizproximity import random import vizfx import vizconnect import viztask WATER_RISE_EVENT = viz.getEventID('WATER_RISE_EVENT') HEIGHT = 0 obj_vis = [] waterSound = viz.addAudio('fountain.wav') waterSound.volume(0.2) choirLocation = viz.addGroup(pos=[0.2, 1.8, -53]) choir_sound = choirLocation.playsound('art/scene1 choir.wav') #scene1 = viz.addChild('art/scene1.osgb') scene1 = vizfx.addChild('art/scene1.osgb') viz.MainView.getHeadLight().disable() # Disable ambient light vizfx.setAmbientColor(viz.BLACK) #scene1.disable(viz.LIGHTING) scene1.hint(viz.ALLOW_NPOT_TEXTURE_HINT) scene1.disable(0x3000) #Disable clip plane on model waterPlane = vizshape.addPlane(size=[400, 400], pos=[0, 0.2, 0]) obj_vis.append(scene1)
def sceen_after_gamefailure(): #smoke at buildings blast_sound1 = viz.addAudio('SPEECH.MP3') blast_sound1.loop(viz.ON) blast_sound1.volume(2) blast_sound1.setTime(3) blast_sound1.setRate(1) blast_sound1.play() smoke1 = viz.addChild('smoke.dae') fire_1 = viz.addChild('fire2.dae') fire_1.setPosition(-85, 60, 260) fire_1.setScale(2, 2, 2) smoke1.setScale(1, 1, 1) smoke1.setPosition(-85, 65, 250) time.sleep(0.1) blast_sound = viz.addAudio('explosion.wav') blast_sound.loop(viz.ON) blast_sound.volume(.2) blast_sound.setTime(3) blast_sound.setRate(1) blast_sound.play() smoke3 = viz.addChild('smoke.dae') fire_3 = viz.addChild('fire2.dae') fire_3.setPosition(65, 60, 260) fire_3.setScale(2, 2, 2) smoke3.setScale(1, 1, 1) smoke3.setPosition(65, 65, 255) time.sleep(0.1) #smoke near to car smoke2 = viz.addChild('smoke.dae') fire_2 = viz.addChild('fire2.dae') fire_2.setScale(0.3, 0.4, 0.3) fire_2.setPosition(-85, 2, 34) smoke2.setScale(0.1, 0.2, 0.3) smoke2.setPosition(-85, 1, 35) time.sleep(0.1) blast_sound1 = viz.addAudio('explosion.wav') blast_sound1.loop(viz.ON) blast_sound1.volume(.5) blast_sound1.setTime(3) blast_sound1.setRate(1) blast_sound1.play() #smoke at small building smoke4 = viz.addChild('smoke.dae') smoke4.setScale(0.3, 0.2, 0.3) smoke4.setPosition(29, 17, 110) fire_4 = viz.addChild('fire2.dae') fire_4.setScale(0.3, 0.7, 0.3) fire_4.setPosition(29, 15, 110) time.sleep(0.1) fire_in_schoolbus = viz.addChild('fire2.dae') fire_in_schoolbus.setScale(0.2, 0.15, 0.2) fire_in_schoolbus.setPosition(-46, 3, 49) #fire near to park smoke5 = viz.addChild('smoke.dae') smoke5.setScale(0.3, 0.3, 0.3) smoke5.setPosition(68, 0.5, 5) fire_5 = viz.addChild('fire2.dae') fire_5.setScale(0.3, 0.5, 0.3) fire_5.setPosition(68, 0, 5) time.sleep(0.1) blast_sound2 = viz.addAudio('explosion.wav') blast_sound2.loop(viz.ON) blast_sound2.volume(.2) blast_sound2.setTime(3) blast_sound2.setRate(1) blast_sound2.play() #fire at the gas station fire = viz.addChild('fire2.dae') fire.setScale(0.3, 0.6, 0.3) fire.setPosition(-46, 0, 136) fire1 = viz.addChild('smoke.dae') fire1.setScale(0.3, 0.3, 0.3) fire1.setPosition(-46, 5, 156) sound1 = viz.addAudio('burning1.wav') sound1.loop(viz.ON) sound1.volume(.3) sound1.setTime(3) sound1.setRate(1) sound1.play() time.sleep(0.8) sound2 = viz.addAudio('firetrucks.wav') sound2.loop(viz.ON) sound2.volume(.2) sound2.setTime(3) sound2.setRate(1) sound2.play() fire3 = viz.addChild('fire2.dae') fire3.setScale(0.2, 0.6, 0.2) fire3.setPosition(-46, 10, 156) fire4 = viz.addChild('fire2.dae') fire4.setScale(0.3, 0.4, 0.2) fire4.setPosition(-66, 0, 136)
viz.go(viz.EMBEDDED) def RecordHR(): while not viz.done(): yield viztask.waitTime( 3) # Record Heart Rate into Array every 3 seconds global recordedHR if (HeartRate.heartRate > 20): recordedHR.append(HeartRate.heartRate) # Add value to Array # Setup Audio trainAtStation = viz.addAudio('Assets/Audio/AtStation.wav') trainAtStation.loop(viz.OFF) trainAtStation.volume(1.0) trainMove = viz.addAudio('Assets/Audio/TrainLoop.wav') trainMove.loop(viz.OFF) trainMove.volume(1.0) trainCrowdLow = viz.addAudio('Assets/Audio/Crowds.wav') trainCrowdLow.loop(viz.ON) trainCrowdLow.volume(0.1) trainCrowdMed = viz.addAudio('Assets/Audio/Crowds.wav') trainCrowdMed.loop(viz.ON) trainCrowdMed.volume(0.5)
Performer plays music as the crowd dances ''' import viz import vizshape import vizact from random import randint viz.setMultiSample(4)#Enable full screen anti-aliasing (FSAA) to smooth edges viz.go() #starts an empty world viz.MainWindow.fov(60)#Increase the Field of View #viz.MainView.move([0,0,7]) #X,Z,Y viz.MainView.setPosition([-0,0,-15]) #viz.MainView.setEuler([0,30,0]) piazza = viz.addChild('piazza.osgb') #adds the plazza sound = viz.addAudio('Panda.mp3') sound.loop(viz.ON) sound.play() platform = viz.addChild('platform.osg') platform.setPosition([0, .2, -11]) micStand = viz.addChild('pole.wrl',parent=platform) micStand.setPosition([0, 0, .5]) micStand.setScale([0.5,0.36,0.5]) micStand.color(1,1,1) mic = viz.addChild('pole.wrl') mic.setPosition([0, 1.6, -10.5]) mic.setScale([0.1,0.04,0.15]) mic.setEuler([0,-45,0]) mic.color(1,0,0)
import viz import vizact import random viz.setMultiSample(4) viz.collision(viz.ON) viz.fov(90) viz.go() viz.phys.enable() mySound = viz.addAudio('Outset Island.mp3') mySound.play() mySound.loop( viz.ON ) #d=viz.addChild('Woola.wrl') #d.setScale(.01,.01,.01) #d.setPosition(2,0,2) vizact.onkeydown( 's', mySound.stop ) vizact.onkeydown( 'p', mySound.play ) #viz.fogcolor(0.5,0.5,0.5) #viz.fog(1,10) viz.clearcolor(viz.SKYBLUE) ground=viz.addChild('ground_grass.osgb') ground.collidePlane() ground.setScale([2,2,2]) viz.MainView.move([0,0,-12]) #change view
btn_newGame = viz.addButton(scene = viz.Scene6) btn_newGame.setPosition(0.75,0.75) btn_newGame.setScale(8,2.3) btn_newGame.downpicture(button['portada_B']) btn_newGame.uppicture(button['portada_A']) #Botones de interaccion btn_credits = viz.addButton(scene = viz.Scene6) btn_credits.setPosition(0.75,0.60) btn_credits.setScale(8,2.3) btn_credits.downpicture(button['portada_B']) btn_credits.uppicture(button['portada_A']) vizact.onbuttonup(btn_newGame, accionbuttonNewGame) chime_sound = viz.addAudio(sound['chime']) viz.MainView.eyeheight(4.75) #viz.MainView.setPosition(-65,5,8) toogleCollide(True) #joy = vizjoy.add() print "MenuPrincipal" accionbuttonNewGame() #vizact.ontimer(0,UpdateJoystick) ''' # Setup Oculus Rift HMD hmd = oculus.Rift()
def UpdatePos(): pos = pos_marker.getPosition() view.setPosition([pos[0], pos[1] - 0.4, -pos[2]], viz.ABS_GLOBAL) updateView = vizact.ontimer(0, UpdatePos) ######################################################################## info = hmd.addMessagePanel(' ', pos=(0, 0, 3)) info.visible(viz.OFF) # Setup a sound metronome = viz.addAudio('Bottle_80.mp3') metronome.loop(viz.ON) metronome.volume(1) alert = viz.addAudio('alert.mp3') # inform participants to stop walking alert.loop(viz.OFF) alert.volume(1) # Set up the targets Target1 = targetCreator(vis_tg_ht, tg1Color) Target2 = targetCreator(vis_tg_ht, tg2Color) def changeColor1(target=Target1): lastColor = random.randint(0, 2) i = 1
viz.go() # Simulate head tracker using keyboard/mouse navigator head_tracker = vizcam.addWalkNavigate() head_tracker.setPosition([0, 1.5, 0]) viz.mouse.setVisible(False) # Add pit model model = viz.add("pit.osgb") model.hint(viz.OPTIMIZE_INTERSECT_HINT) # Get handle to platform object platform = model.getChild("platform") platform.raised = False platform.positions = [[0, 0, 0], [0, 7, 0]] platform.audio_start = viz.addAudio("sounds/platform_start.wav") platform.audio_running = viz.addAudio("sounds/platform_running.wav", loop=True) platform.audio_stop = viz.addAudio("sounds/platform_stop.wav") scary_voice = viz.addAudio("scary_voice.wav") vizact.onkeydown("3", scary_voice.play()) def TogglePlatform(): """Toggle raising/lower of platform""" platform.raised = not platform.raised pos = platform.positions[platform.raised] platform.audio_start.stop() platform.audio_start.play() platform.audio_running.play() platform.runAction(vizact.moveTo(pos, speed=2.0))
def UpdatePos(): pos = pos_marker.getPosition() view.setPosition([pos[0], pos[1] - 0.2, -pos[2]], viz.ABS_GLOBAL) updateView = vizact.ontimer(0, UpdatePos) ######################################################################################################## info = hmd.addMessagePanel(' ', pos=(0, 0, 3)) info.visible(viz.OFF) # Setup a sound sound = viz.addAudio('Bottle_80.mp3') # The metronome. sound.loop(viz.ON) sound.volume(1) alert = viz.addAudio( 'alert.mp3') # An alert to notify the participant to stop. alert.loop(viz.OFF) alert.volume(1) # Set up a file to record the response to the changes of target LED colour sub_rt = open( 'VRrt_' + id + '_' + Exp_Sequence + '_' + Room_Sequence + '_' + Room_OffsetOrder + '_' + Distance_Sequence + '_' + Distance_OffsetOrder + '_' + time.strftime("%d-%b-%y_%H-%M") + '.csv', 'a') Color = [viz.RED, viz.GREEN, viz.YELLOW]
def loadAudioFile(filePath): loadedAudioFile = None loadedAudioFile = viz.addAudio(filePath) return loadedAudioFile
for loc in avatarMove: if loc == avatarMove[2]: RandomWait = vizact.waittime(vizact.randfloat(5, 10)) else: RandomWait = vizact.waittime(vizact.randfloat(0, 0)) #Add an action to walk to the next painting, turn towards it, and wait a few seconds actions.append(vizact.method.playsound('footsteps.wav', viz.LOOP)) actions.append(vizact.walkTo([loc[0], 0, loc[1]], turnSpeed=250.0)) actions.append(vizact.method.playsound('footsteps.wav', viz.STOP)) actions.append(vizact.turn(loc[2], 250.0)) actions.append(RandomWait) avatar.addAction(vizact.sequence(actions, viz.FOREVER)) music = viz.addAudio('bach_air.mid', loop=1) # Add fall sound fallSound = viz.addAudio('sounds/pit_fall.wav') # Add blur effect for fall action import vizfx.postprocess from vizfx.postprocess.blur import DirectionalBlurEffect blurEffect = DirectionalBlurEffect(samples=3, angle=90) vizfx.postprocess.addEffect(blurEffect) # Add red quad to flash screen after falling flash_quad = viz.addTexQuad(parent=viz.ORTHO) flash_quad.color(viz.RED) flash_quad.alignment(viz.ALIGN_LEFT_BOTTOM)
# Adjust models size models['homePole'].setScale([0.6,0.45,0.6]) # the original size = [0.4 3 0.4] models['leaderPole'].setScale([0.5,0.6,0.5]) # the original size = [0.4 3 0.4] # the transparency _alpha = 0.0 # Hide loaded models models['homePole'].visible(viz.OFF) models['targetPole'].visible(viz.OFF) models['leaderPole'].visible(viz.OFF) models['orientPole'].visible(viz.OFF) # Sounds sounds={} sounds['End'] = viz.addAudio(SOUND_DIR + 'End.mp3') sounds['Begin'] = viz.addAudio(SOUND_DIR + 'Begin.mp3') # Initial data_collection, regardless of DATA_COLLECT data_collect = False # Initializes trial_stage, which is the current step of a given trial # First part is the name of the specific stage (pretrial) # Second part is practice (01) or experimental trials (02) # Third part is the stage's position in the order of all stages (01) goToStage('pretrial_00_01') # Initializa setting to start free walk trials if DO_FREEWALK == True: is_freewalk = True
models['pacePole'].visible(viz.OFF) # Turn on the lights light1 = viz.addLight() #Add an overhead light light1.setEuler(0, 90, 0) light2 = viz.addLight( ) #Next four are lights from each direction to ensure even lighting light2.setEuler(90, 0, 0) light3 = viz.addLight() light3.setEuler(0, 0, 0) light4 = viz.addLight() light4.setEuler(180, 0, 0) light5 = viz.addLight() light5.setEuler(270, 0, 0) # Sounds sounds = {} sounds['end'] = viz.addAudio( os.path.abspath(os.path.join(SOUND_DIR, 'End.mp3'))) sounds['begin'] = viz.addAudio( os.path.abspath(os.path.join(SOUND_DIR, 'Begin.mp3'))) sounds['startover'] = viz.addAudio( os.path.abspath(os.path.join(SOUND_DIR, 'Startover.mp3'))) sounds['stop'] = viz.addAudio( os.path.abspath(os.path.join(SOUND_DIR, 'Stop.wav'))) # set up IO # Dialog box asking for type of control and subject number HMD = 'Odyssey' MONITOR = 'PC Monitor' controlOptions = [HMD, MONITOR] controlType = controlOptions[viz.choose('How would you like to explore? ', controlOptions)] subject = viz.input('Please enter the subject number:', '')
def __init__(self,use_keyboard = True, desktop_mode = False): """Initialization function.""" caveapp.CaveApplication.__init__(self,desktop_mode) #call constructor of super class, you have to do this explicitly in Python viz.phys.enable() self.view = viz.MainView; self.backGroundMusic = viz.addAudio('Windmill hut.wav') self.backGroundMusic.volume(0.5) self.backGroundMusic.loop(viz.ON) self.backGroundMusic.play() self.gameMusic = viz.addAudio('Battle.wav') self.gameMusic.volume(0.7) headLight = viz.MainView.getHeadLight() headLight.intensity(100) headLight.disable() for i in range(3): light = viz.addLight() light.setPosition(0, 2, (i*10)) light.position(0,0,0,1) self.use_keyboard = use_keyboard #store if we want to use the keyboard self.scaleValue = 0.03 self.shootingRange = viz.addChild('ShootingRange.FBX') self.shootingRange.setScale(self.scaleValue, self.scaleValue,self.scaleValue ) self.shootingRange.name = 'shootingRange' self.shootingRange.collideMesh() self.shootingRange.disable(viz.DYNAMICS) self.target = viz.addChild('target.FBX') self.target.name = 'target' self.target.setScale(0.9, 0.9, 0.9) self.target.collideBox(density = 100) self.target.enable(viz.COLLIDE_NOTIFY) self.target.setPosition(0,0, 15) self.enemyGun = viz.addChild('Gun.FBX') self.enemyGun.name = 'enemyGun' self.enemyGun.setScale(self.scaleValue, self.scaleValue, self.scaleValue) self.enemyGun.setPosition(0, 1.8, 14) self.enemyGun.setEuler(180,0,0) self.bullet = viz.add('Bullet.FBX') self.bullet.setPosition(0,1,2) self.bullet.setScale(self.scaleValue, self.scaleValue,self.scaleValue) self.bullet.name = 'bullet' self.bullet.collideCapsule(0.2, 0.1, density = 1, hardness = 1) self.bullet.enable(viz.COLLIDE_NOTIFY) self.nextShot = True self.enemyBullet = viz.add('Bullet.FBX') self.enemyBullet.setPosition(0,1,10) self.enemyBullet.setScale(0.05, 0.05, 0.05) self.enemyBullet.name = 'enemyBullet' self.enemyShot = False self.enemyShootTimer = vizact.ontimer(3, self.repositionEnemyGun) self.moveEnemyBulletTimer = vizact.ontimer(0, self.moveEnemyBullet) self.moveEnemyBulletTimer.setEnabled(False) self.enemyShootTimer.setEnabled(False) self.shootTimer = vizact.ontimer(1, self.schootClick) self.shootTimer.setEnabled(False) self.rings = ['Ring10', 'Ring20', 'Ring30', 'Ring40', 'Ring50'] self.currentScore = 0 self.scoreBaseText = 'Score: ' self.firstLabelText = self.scoreBaseText + str(self.currentScore) self.scoreLabel = viz.addText3D(self.firstLabelText) self.scoreLabel.setBackdrop(viz.BACKDROP_RIGHT_BOTTOM) self.scoreLabel.setPosition(-1.7, 1, 0) self.scoreLabel.setEuler(-90,0,0) self.scoreLabel.color(viz.SKYBLUE) self.scoreLabel.setScale(0.3, 0.3, 0.3) self.scoreLabel.alignment(viz.ALIGN_CENTER_BOTTOM) self.currentHighScore = 0 self.highScoreBaseText = 'Highscore: ' self.firstHighScoreLabelText = self.highScoreBaseText + str(self.currentHighScore) self.highScoreLabel = viz.addText3D(self.firstHighScoreLabelText) self.highScoreLabel.setPosition(-1.7, 1.5, 0) self.highScoreLabel.setEuler(-90,0,0) self.highScoreLabel.color(viz.BLUE) self.highScoreLabel.setScale(0.3, 0.3, 0.3) self.highScoreLabel.alignment(viz.ALIGN_CENTER_BOTTOM) self.newPointLabel = viz.addText3D('') self.newPointLabel.color(viz.GREEN) self.newPointLabel.setPosition(self.target.getPosition()[0], self.target.getPosition()[1] + 2, self.target.getPosition()[2]) self.newPointLabel.alignment(viz.ALIGN_CENTER_BOTTOM) self.maxTimeNewPointVisible = 1 self.newHitLabel = viz.addText3D('') self.newHitLabel.color(viz.RED) self.newHitLabel.setPosition(self.target.getPosition()[0], self.target.getPosition()[1] + 2.5, self.target.getPosition()[2]) self.newHitLabel.alignment(viz.ALIGN_CENTER_BOTTOM) self.maxTimeHitVisible = 1 self.newPointTimer = vizact.ontimer(self.maxTimeNewPointVisible, self.makeNewPointLabelInvisible) self.newPointTimer.setEnabled(False) self.newHitPointTimer = vizact.ontimer(self.maxTimeHitVisible, self.makeNewHitLabelInvisible) self.newHitPointTimer.setEnabled(False) self.goodSound = viz.addAudio('good.mp3') self.coinSound = viz.addAudio('coin.wav') self.hitSound = viz.addAudio('hit.mp3') self.playTimer = vizact.ontimer(1, self.timerClick) self.playTimer.setEnabled(False) self.playTime = 35 self.currentTime = 0 self.isPlaying = False self.timeBaseText = 'Time: ' self.timeLabel = viz.addText3D(self.timeBaseText) self.timeLabel.setPosition(1.1, 1.3, 3) self.timeLabel.setEuler(50,0,0) self.timeLabel.setScale(0.3, 0.3, 0.3) self.timeLabel.alignment(viz.ALIGN_CENTER_BOTTOM) self.timeLabel.visible(False) self.scope = viz.addChild('Scope.FBX') viz.startLayer(viz.LINES) viz.vertexColor(viz.RED) viz.vertex(-0.2, 0,0) viz.vertex(0.2, 0, 0) self.topLine = viz.endLayer() viz.startLayer(viz.LINES) viz.vertexColor(viz.RED) viz.vertex(0, 0,0) viz.vertex(0, -0.3, 0) self.leftLine = viz.endLayer() viz.startLayer(viz.LINES) viz.vertexColor(viz.RED) viz.vertex(0, 0,0) viz.vertex(0, -0.3, 0) self.rightLine = viz.endLayer() self.time = 0.0
for loc in avatarMove: if loc == avatarMove[2]: RandomWait = vizact.waittime(vizact.randfloat(5,10)) else: RandomWait = vizact.waittime(vizact.randfloat(0,0)) #Add an action to walk to the next painting, turn towards it, and wait a few seconds actions.append(vizact.method.playsound('footsteps.wav',viz.LOOP)) actions.append(vizact.walkTo([loc[0],0,loc[1]],turnSpeed=250.0)) actions.append(vizact.method.playsound('footsteps.wav',viz.STOP)) actions.append(vizact.turn(loc[2],250.0)) actions.append(RandomWait) avatar.addAction(vizact.sequence(actions,viz.FOREVER)) music = viz.addAudio('bach_air.mid',loop=1) # Add fall sound fallSound = viz.addAudio('sounds/pit_fall.wav') # Add blur effect for fall action import vizfx.postprocess from vizfx.postprocess.blur import DirectionalBlurEffect blurEffect = DirectionalBlurEffect(samples=3,angle=90) vizfx.postprocess.addEffect(blurEffect) # Add red quad to flash screen after falling flash_quad = viz.addTexQuad(parent=viz.ORTHO) flash_quad.color(viz.RED)
pass def save(self, filename=None): """Saves the setting file""" if filename is None: filename = 'sound_settings.json' volumeDict = {} for name, sound in self._soundDict.iteritems(): volumeDict[name] = sound.getVolume() with open(filename, 'w') as file: file.write(json.dumps(volumeDict)) if __name__ == "__main__": sound = viz.addAudio('conversation.wav') sound.loop(True) sound.play() soundFountain = viz.addAudio('fountain.wav') soundFountain.loop(True) soundFountain.play() # window = vizconfig.getConfigWindow('sound manager') # window.setWindowVisible(True) manager = SoundManager() manager.add(sound, 'conversation') manager.add(soundFountain, 'fountain') viz.go() viz.add('piazza.osgb')
size_sphere_around = 8.0 radius_sphere = 0.08 larger_ball_main_r = 2.0 larger_ball_around = 2.0 larger_ball_sphere2 = 0.70 tranparent = 0.4 #grabber = vizconnect.getRawTool('grabber') #music= viz.addAudio('art/beijing.wav') #music_scene2_keep=viz.addAudio('art/Lana Del Rey - Dark Paradise.wav') music = viz.addAudio('art/scene2-ghost.wav') music_scene2_keep = viz.addAudio('art/scene2 horror.wav') grabObjects = [] for iii in range(center_num): tmp_main_radius = size_main_sphere if iii == center_num - 1: tmp_main_radius = larger_ball_main_r mainSphere = vizshape.addSphere(radius=tmp_main_radius, pos=center_pos[iii], color=viz.RED) mainSphere.alpha(0.01) grabObjects.append(mainSphere)
plant6 = viz.addChild('plant.osgb') plant6.setScale(3, 3, 3) plant6.setPosition(-110, 0, -35) plant5 = viz.addChild('plant.osgb') plant5.setScale(3, 3, 3) plant5.setPosition(-90, 0, -30) plant7 = viz.addChild('plant.osgb') plant7.setScale(3, 3, 3) plant7.setPosition(-52, 0, -30) #######################sound############################ #Adding sound sound = viz.addAudio('radio2.mp3') #sound.loop(viz.ON) sound.volume(2) sound.setTime(3) sound.setRate(1) sound.play() ######################################################## speech = vizact.speak('jfk.wav') run11 = vizact.walkTo([-45, 2.40, 30.5], verb='run') run21 = vizact.walkTo([-45, 2.40, 30.5], verb='run') run31 = vizact.walkTo([-45, 2.40, 30.5], verb='run') run41 = vizact.walkTo([-42, 2.40, 30.5], verb='run')
import viz import vizact import vizproximity import vizfx import vizact scene3 = vizfx.addChild('art/scene3.osgb') scene3.hint(viz.OPTIMIZE_INTERSECT_HINT) #viz.clearcolor(viz.SLATE) obj_vis = [] obj_vis.append(scene3) scene3_music = viz.addAudio('art/scene3 paradise.wav') scene3_music.volume(1) # Get handle to starting_box object starting_box = scene3.getChild('Starting Box-GEODE') starting_box_height = 6.91645 starting_box.audio_start = viz.addAudio('sounds/platform_start.wav') starting_box.audio_running = viz.addAudio('sounds/platform_running.wav',loop=True) starting_box.audio_stop = viz.addAudio('sounds/platform_stop.wav') wallHeights = [4.91645, 14.91645, 24.91645, 34.91645, 44.91645, 54.91645, 64.91645, 74.91645, 84.91645] leftWalls = [] rightWalls = [] def lowerWalls(): for i in range(1,10): leftWallName = 'Left' + str(i) +'-GEODE' rightWallName = 'Right' + str(i) + '-GEODE'
""" import viz import vizact import vizproximity import viztask import vizinfo viz.setMultiSample(4) viz.fov(60) viz.go() # Add info panel to display messages to participant instructions = vizinfo.InfoPanel(icon=False, key=None) # Add ambient sound piazzaSound = viz.addAudio("piazza.mp3") piazzaSound.play() piazzaSound.loop() piazza = viz.add("piazza.osgb") # Move the viewpoint to the starting location viz.MainView.move([10.5, 0, 20.5]) viz.MainView.setEuler([-90, 0, 0]) # Add male and female avatars in conversation male = viz.addAvatar("vcc_male.cfg", pos=[-2.6, 0, 10.4], euler=[-40, 0, 0]) female = viz.addAvatar("vcc_female.cfg", pos=[-3.4, 0, 11.2], euler=[140, 0, 0]) male.state(14) female.state(14)