def update_crop(self): """ Create 2 display captures. Create crop filter with this name: cropXY. Check relative. Set Width and Height to relatively small numbers e.g : 64x64 . Image mask blend + color correction might be an option too. Run script,select this source as cursor source , check Update crop, click start. """ source = obs.obs_get_source_by_name(self.source_name) crop = obs.obs_source_get_filter_by_name(source, "cropXY") filter_settings = obs.obs_source_get_settings(crop) _x, _y = get_position() # https://github.com/obsproject/obs-studio/blob/79981889c6d87d6e371e9dc8fcaad36f06eb9c9e/plugins/obs-filters/crop-filter.c#L87-L93 w = obs.obs_data_get_int(filter_settings, "cx") h = obs.obs_data_get_int(filter_settings, "cy") h, w = int(h / 2), int(w / 2) obs.obs_data_set_int(filter_settings, "left", _x - h) obs.obs_data_set_int(filter_settings, "top", _y - w) obs.obs_source_update(crop, filter_settings) obs.obs_data_release(filter_settings) obs.obs_source_release(source) obs.obs_source_release(crop)
def durUpdate(self, *args): global globSettings if self.duration.get().isdigit(): obs.obs_data_set_int(globSettings, "duration", int(self.duration.get())) else: self.duration.set( str(obs.obs_data_get_int(globSettings, "duration")))
def script_load(settings): Data._format_ = obs.obs_data_get_string(settings, 'format') Data._timerDuration_ = obs.obs_data_get_int(settings, 'timer_duration') if not Data._format_: Data._format_ = Data._defaultFormat_ obs.obs_data_set_string(settings, 'format', Data._format_) obs.obs_data_set_int(settings, 'timer_duration', Data._timerDuration_)
def update_text(self, scripted_text, color=None): """takes scripted_text , sets its value in obs """ with source_ar(self.source_name) as source, data_ar() as settings: self.text_string = scripted_text if color: obs.obs_data_set_int(settings, "color", color) # colored text obs.obs_data_set_string(settings, "text", self.text_string) obs.obs_source_update(source, settings)
def script_load(settings): Data._format_ = obs.obs_data_get_string(settings, 'format') Data._timeBetweenMessages_ = obs.obs_data_get_int(settings, 'seconds_between_lines') if not Data._format_: Data._format_ = Data._defaultFormat_ obs.obs_data_set_string(settings, 'format', Data._format_) obs.obs_data_set_int(settings, 'seconds_between_lines', Data._timeBetweenMessages_)
def update_text(self, scripted_text, color=None): """takes scripted_text , sets its value in obs """ source = obs.obs_get_source_by_name(self.source_name) settings = obs.obs_data_create() self.text_string = scripted_text if color: obs.obs_data_set_int(settings, "color", color) # colored text obs.obs_data_set_string(settings, "text", self.text_string) obs.obs_source_update(source, settings) obs.obs_data_release(settings) obs.obs_source_release(source)
def add_filter_to_source(self): source = S.obs_get_source_by_name("1test_py") settings = S.obs_data_create() S.obs_data_set_int(settings, "opacity", 50) source_color = S.obs_source_create_private("color_filter", "opacity to 50", settings) S.obs_source_filter_add(source, source_color) S.obs_source_release(source) S.obs_data_release(settings) S.obs_source_release(source_color)
def SetDestinationPositionAndSize(props, p): global Animations # Base the index off of the name since callbacks don't work well. name = obs.obs_property_name(p) indexStr = re.sub("[^0-9]", "", name) animationIndex = int(indexStr) scene_item = getSceneItem() posV = obs.vec2() scaleV = obs.vec2() obs.obs_sceneitem_get_pos(scene_item, posV) obs.obs_sceneitem_get_scale(scene_item, scaleV) width, height = calculateSize(scene_item, scaleV.x, scaleV.y) Animations[animationIndex].destinationX = posV.x Animations[animationIndex].destinationY = posV.y Animations[animationIndex].destinationWidth = width Animations[animationIndex].destinationHeight = height obs.obs_data_set_int(settings, Animations[animationIndex].destinationXStorage, (int)(Animations[animationIndex].destinationX)) obs.obs_data_set_int(settings, Animations[animationIndex].destinationYStorage, (int)(Animations[animationIndex].destinationY)) obs.obs_data_set_int(settings, Animations[animationIndex].destinationWidthStorage, (int)(Animations[animationIndex].destinationWidth)) obs.obs_data_set_int(settings, Animations[animationIndex].destinationHeightStorage, (int)(Animations[animationIndex].destinationHeight))
def setSourceOpacityByName(self, sourceName, opacity): """ Sets the opacity of the given source by name, if it has the appropriate filter. """ with getSourceByName(sourceName) as source: if source is None: return with sourceGetFilterByName(source, OPACITY_FILTER_NAME) as filter: if filter is None: return with createObsData() as settings: obs.obs_data_set_int(settings, 'opacity', int(opacity)) obs.obs_source_update(filter, settings)
def set_filter_value(source_name, filter_name, filter_field_name, value): source = obs.obs_get_source_by_name(source_name) if source is not None: filter = obs.obs_source_get_filter_by_name(source, filter_name) if filter is not None: # Get the settings data object for the filter filter_settings = obs.obs_source_get_settings(filter) # Update the hue_shift property and update the filter with the new settings obs.obs_data_set_int(filter_settings, filter_field_name, value) obs.obs_source_update(filter, filter_settings) # Release the resources obs.obs_data_release(filter_settings) obs.obs_source_release(filter) obs.obs_source_release(source)
def set_sources(): #run only at loading and in thread t global update_time global slideText global last_slideText global source_1_name global source_2_name global transparency1 global transparency2 update_time = time.time() transparency2 = transparency1 transparency1 = 0 source1 = obs.obs_get_source_by_name(source_1_name) source2 = obs.obs_get_source_by_name(source_2_name) filter1 = obs.obs_source_get_filter_by_name(source1, COLOR_FILTER_NAME) filter2 = obs.obs_source_get_filter_by_name(source2, COLOR_FILTER_NAME) source1Settings = obs.obs_data_create() source2Settings = obs.obs_data_create() filter1Settings = obs.obs_data_create() filter2Settings = obs.obs_data_create() if source1 is not None: obs.obs_data_set_string(source1Settings, "text", slideText) if source2 is not None: obs.obs_data_set_string(source2Settings, "text", last_slideText) obs.obs_data_set_int(filter1Settings, "opacity", transparency1) obs.obs_data_set_int(filter2Settings, "opacity", transparency2) else: obs.obs_data_set_int(filter1Settings, "opacity", 100) elif source2 is not None: obs.obs_data_set_string(source2Settings, "text", last_slideText) obs.obs_data_set_int(filter1Settings, "opacity", 0) obs.obs_data_set_int(filter2Settings, "opacity", 100) obs.obs_source_update(source1, source1Settings) obs.obs_source_update(source2, source2Settings) obs.obs_source_update(filter1, filter1Settings) obs.obs_source_update(filter2, filter2Settings) obs.obs_data_release(source1Settings) obs.obs_data_release(source2Settings) obs.obs_data_release(filter1Settings) obs.obs_data_release(filter2Settings) obs.obs_source_release(source1) obs.obs_source_release(source2) obs.obs_source_release(filter1) obs.obs_source_release(filter2)
def hue_effect(self): "apply random hue,add second color to see the effect" self.update_text(self._scripted_text) with source_ar(self.source_name) as source, filter_ar(source, "py_hue") as hue: if hue is None: with data_ar() as settings: with p_source_ar("color_filter", "py_hue", settings) as _source: obs.obs_source_filter_add(source, _source) with data_ar(hue) as filter_settings: seed() n = randrange(-180, 180) obs.obs_data_set_int(filter_settings, "hue_shift", n) obs.obs_source_update(hue, filter_settings) if self.duration // self.refresh_rate <= 3: obs.obs_source_filter_remove(source, hue) self.duration = 0
def set_source_text(source_name: str, text: str, color: int): if source_name is None or source_name == "": return # copied and modified from scripted-text.py by UpgradeQ with source_ar(source_name) as source, data_ar() as settings: obs.obs_data_set_string(settings, "text", text) source_id = obs.obs_source_get_unversioned_id(source) if color is not None: if source_id == "text_gdiplus": obs.obs_data_set_int(settings, "color", color) # colored text # freetype2 is BGR, should be reversed for getting correct color else: number = "".join(hex(color)[2:]) color = int("0xff" f"{number}", base=16) obs.obs_data_set_int(settings, "color1", color) obs.obs_source_update(source, settings)
def update_color(self): source = obs.obs_get_source_by_name(self.source_name) scroll = obs.obs_source_get_filter_by_name(source, "py_color") filter_settings = obs.obs_source_get_settings(scroll) value = self.value if value: self.last_value = value if value == 0: value = self.last_value value = 50 - value obs.obs_data_set_int(filter_settings, "opacity", value) obs.obs_source_update(scroll, filter_settings) obs.obs_data_release(filter_settings) obs.obs_source_release(source) obs.obs_source_release(scroll)
def sanic_effect(self): "really fast speed text scrolling(filter)" # add filter scroll to source if not present, self.update_text(self._scripted_text) with source_ar(self.source_name) as source, filter_ar( source, "py_scroll" ) as scroll: if scroll is None: with data_ar() as settings: obs.obs_data_set_int(settings, "speed_x", 5000) with p_source_ar("scroll_filter", "py_scroll", settings) as _source: obs.obs_source_filter_add(source, _source) with data_ar(scroll) as filter_settings: obs.obs_data_set_int(filter_settings, "speed_x", 5000) obs.obs_source_update(scroll, filter_settings) if self.duration // self.refresh_rate <= 3: obs.obs_source_filter_remove(source, scroll) self.duration = 0
def aseta(props, p): global liitteet global settings sources = obs.obs_enum_sources() for liite in liitteet: for source in sources: nimi = obs.obs_source_get_name(source) if nimi.endswith(liite): data = obs.obs_data_create() if "N" in liite: tieto = obs.obs_data_get_string(settings, liite) obs.obs_data_set_string(data, "text", tieto) elif "V" in liite: tieto = obs.obs_data_get_string(settings, liite) obs.obs_data_set_int(data, "color", int(tieto)) obs.obs_source_update(source, data) obs.obs_data_release(data) obs.obs_source_release(source)
def sanic_effect(self): # add filter scroll to source if not present, self.update_text(self.scripted_text) with source_ar(self.source_name) as source, filter_ar( source, "py_scroll") as scroll: if scroll is None: with data_ar() as settings: obs.obs_data_set_int(settings, "speed_x", 5000) source_scroll = obs.obs_source_create_private( "scroll_filter", "py_scroll", settings) obs.obs_source_filter_add(source, source_scroll) obs.obs_source_release(source_scroll) with data_ar(scroll) as filter_settings: obs.obs_data_set_int(filter_settings, "speed_x", 5000) obs.obs_source_update(scroll, filter_settings) if self.duration // self.refresh_rate <= 3: obs.obs_source_filter_remove(source, scroll) self.duration = 0
def reset_match_info(prop=None, props=None): obs.obs_data_set_string(settings, 'match_type', 'qualification') obs.obs_data_set_int(settings, 'match_pair', 1) obs.obs_data_set_int(settings, 'match_number', 1) obs.obs_data_set_int(settings, 'match_code', 1) print(f'Match info reset') print()
def fade_effect(self): "fade text via opacity filter" self.update_text(self._scripted_text) with source_ar(self.source_name) as source, filter_ar( source, "py_fade" ) as fade: if fade is None: with data_ar() as settings: with p_source_ar("color_filter", "py_fade", settings) as _source: obs.obs_source_filter_add(source, _source) with data_ar(fade) as filter_settings: try: coefficient = self.effect_duration / self.duration percent = 100 / coefficient except ZeroDivisionError: percent = 0 obs.obs_data_set_int(filter_settings, "opacity", int(percent)) obs.obs_source_update(fade, filter_settings) if self.duration // self.refresh_rate <= 3: obs.obs_source_filter_remove(source, fade) self.duration = 0
def set_color(self, color, settings): if self._obs_source_type == "text_gdiplus": obs.obs_data_set_int(settings, "color", color) # colored text else: # freetype2,if taken from user input it should be reversed for getting correct color if not color in self.default_palette: number = "".join(reversed(hex(color)[2:])) else: number = "".join(hex(color)[2:]) color = int("0xff" f"{number}", base=16) obs.obs_data_set_int(settings, "color1", color) obs.obs_data_set_int(settings, "color2", color)
def transition(): global update_time global source_1_name global source_2_name global transparency1 global transparency2 global transition_time with thread_lock: if transparency1 < 100: time_since_last_update = time.time() - update_time lerp = int(time_since_last_update * 100 / transition_time) transparency1 = lerp if transparency1 >= 100: transparency1 = 100 transparency2 = 100 - lerp if transparency2 <= 0: transparency2 = 0 #obs.timer_remove(transition) source1 = obs.obs_get_source_by_name(source_1_name) source2 = obs.obs_get_source_by_name(source_2_name) if source1 is not None and source2 is not None: settings1 = obs.obs_data_create() settings2 = obs.obs_data_create() obs.obs_data_set_int(settings1, "opacity", transparency1) obs.obs_data_set_int(settings2, "opacity", transparency2) obs.obs_data_set_int(settings1, "outline_opacity", transparency1) obs.obs_data_set_int(settings2, "outline_opacity", transparency2) obs.obs_source_update(source1, settings1) obs.obs_source_update(source2, settings2) obs.obs_data_release(settings1) obs.obs_data_release(settings2) obs.obs_source_release(source1) obs.obs_source_release(source2)
def transition(): global update_time global source_1_name global source_2_name global slideText global last_slideText global background_name global transparency1 global transparency2 global transition_time with thread_lock: if transparency1 < 100: time_since_last_update = time.time() - update_time if time_since_last_update > update_time: # prevent overflow time_since_last_update == update_time lerp = int(time_since_last_update * 100 / transition_time) transparency1 = lerp if transparency1 >= 100: transparency1 = 100 transparency2 = 100 - lerp if transparency2 <= 0: transparency2 = 0 # Update sources source1 = obs.obs_get_source_by_name(source_1_name) source2 = obs.obs_get_source_by_name(source_2_name) if source1 is not None and source2 is not None: filter1 = obs.obs_source_get_filter_by_name( source1, COLOR_FILTER_NAME) filter2 = obs.obs_source_get_filter_by_name( source2, COLOR_FILTER_NAME) settings1 = obs.obs_data_create() settings2 = obs.obs_data_create() obs.obs_data_set_int(settings1, "opacity", transparency1) obs.obs_data_set_int(settings2, "opacity", transparency2) obs.obs_source_update(filter1, settings1) obs.obs_source_update(filter2, settings2) obs.obs_data_release(settings1) obs.obs_data_release(settings2) obs.obs_source_release(filter1) obs.obs_source_release(filter2) # Update background background = obs.obs_get_source_by_name(background_name) if background is not None: filterb = obs.obs_source_get_filter_by_name( background, COLOR_FILTER_NAME) settingsb = obs.obs_data_create() if source1 is not None and source2 is not None: if last_slideText == "" and slideText != "": obs.obs_data_set_int(settingsb, "opacity", transparency1) obs.obs_source_update(filterb, settingsb) elif last_slideText != "" and slideText == "": obs.obs_data_set_int(settingsb, "opacity", transparency2) obs.obs_source_update(filterb, settingsb) else: if last_slideText == "" and slideText != "": obs.obs_data_set_int(settingsb, "opacity", 100) obs.obs_source_update(filterb, settingsb) elif last_slideText != "" and slideText == "": obs.obs_data_set_int(settingsb, "opacity", 0) obs.obs_source_update(filterb, settingsb) obs.obs_data_release(settingsb) obs.obs_source_release(filterb) obs.obs_source_release(source1) obs.obs_source_release(source2) obs.obs_source_release(background)
def start_recording(pressed=False): if pressed: return if obs.obs_output_active(output): print(f'WARNING: Currently recording {get_match_name()}') print() return match_fd, match_path = tempfile.mkstemp(suffix='.mkv') output_settings = obs.obs_data_create() obs.obs_data_set_string(output_settings, 'path', f'{match_path}') obs.obs_output_update(output, output_settings) obs.obs_data_release(output_settings) if not obs.obs_output_start(output): print( f'ERROR: Could not start match recording: {obs.obs_output_get_last_error(output) or "Unknown error"}' ) print() return if obs.obs_data_get_string( settings, 'scorekeeper_api') and obs.obs_data_get_string( settings, 'event_code'): try: with urllib.request.urlopen( f'{obs.obs_data_get_string(settings, "scorekeeper_api")}/v1/events/{obs.obs_data_get_string(settings, "event_code")}/matches/active/' ) as matches: match_data = json.load(matches)['matches'] if len(match_data) > 0: match_name = match_data[-1]['matchName'] match_code = match_data[-1]['matchNumber'] if match_name[0] == 'Q': obs.obs_data_set_string(settings, 'match_type', 'qualification') obs.obs_data_set_int(settings, 'match_pair', 1) obs.obs_data_set_int(settings, 'match_number', int(match_name[1:])) elif match_name[0:2] == 'SF' or match_name[0] == 'F': with urllib.request.urlopen( f'{obs.obs_data_get_string(settings, "scorekeeper_api")}/v1/events/{obs.obs_data_get_string(settings, "event_code")}/elim/all/' ) as elims: match_code = len(json.load(elims)['matchList']) + 1 if match_name[0] == 'F': obs.obs_data_set_string(settings, 'match_type', 'final') obs.obs_data_set_int(settings, 'match_pair', 1) obs.obs_data_set_int(settings, 'match_number', int(match_name[2:])) else: obs.obs_data_set_string(settings, 'match_type', 'semi-final') obs.obs_data_set_int(settings, 'match_pair', int(match_name[2])) obs.obs_data_set_int(settings, 'match_number', int(match_name[4:])) else: print( f'WARNING: Recording unknown match type "{match_name}"' ) obs.obs_data_set_int(settings, 'match_number', match_code) obs.obs_data_set_int(settings, 'match_code', match_code) except Exception: print(f'WARNING: Failed to communicate with scorekeeper') print(f'Recording started for {get_match_name()}')
def set_source_speed(source, speed): settings = obs.obs_source_get_settings(source) speedpct = int(speed * 100) obs.obs_data_set_int(settings, "speed_percent", speedpct) obs.obs_source_update(source, settings) obs.obs_data_release(settings)
def set_text_source_settings(settings, text_data): with data_ar() as font_settings: obs.obs_data_set_string(font_settings, "face", DEFAULT_FONT) obs.obs_data_set_int(font_settings, "size", 80) obs.obs_data_set_obj(settings, "font", font_settings) obs.obs_data_set_string(settings, "text", text_data)
def setSourceTextColorByName(self, sourceName, color, outline=None): """ Sets the color of the given text source by name. The color should be an int, in OBS color format. The outline color can either be None (meaning no outline) or an int in OBS color format. """ with getSourceByName(sourceName) as source: if source is None: return with createObsData() as settings: if obs.obs_source_get_id(source) == 'text_ft2_source': obs.obs_data_set_int(settings, 'color1', color) obs.obs_data_set_int(settings, 'color2', color) # FreeType2 currently doesn't support setting # outline colors. We *could* turn the outline on, # but that's probably not what whoever specified an # outline wanted. So we just won't. elif obs.obs_source_get_id(source) == 'text_gdiplus': colorRGB = rgbaToColor(*colorToRgba(color)[:3]) colorA = int(colorToRgba(color)[3] * 100 / 255) obs.obs_data_set_int(settings, 'color', colorRGB) obs.obs_data_set_int(settings, 'opacity', colorA) obs.obs_data_set_bool(settings, 'outline', outline is not None) if outline is not None: outlineRGB = rgbaToColor(*colorToRgba(outline)[:3]) outlineA = int(colorToRgba(outline)[3] * 100 / 255) obs.obs_data_set_int(settings, 'outline_color', outlineRGB) obs.obs_data_set_int(settings, 'outline_opacity', outlineA) obs.obs_source_update(source, settings)
def script_defaults(settings_data): global OBS_OSC_PORT obs.obs_data_set_int(settings_data, "osc-port", OBS_OSC_PORT)
def set_sources(): #run only at loading and in thread t global update_time global slideText global last_slideText global source_1_name global source_2_name global transparency1 global transparency2 update_time = time.time() transparency2 = transparency1 transparency1 = 0 source1 = obs.obs_get_source_by_name(source_1_name) source2 = obs.obs_get_source_by_name(source_2_name) settings1 = obs.obs_data_create() settings2 = obs.obs_data_create() if source1 is not None: obs.obs_data_set_string(settings1, "text", slideText) if source2 is not None: obs.obs_data_set_string(settings2, "text", last_slideText) obs.obs_data_set_int(settings1, "opacity", transparency1) obs.obs_data_set_int(settings2, "opacity", transparency2) obs.obs_data_set_int(settings1, "outline_opacity", transparency1) obs.obs_data_set_int(settings2, "outline_opacity", transparency2) else: obs.obs_data_set_int(settings1, "opacity", 100) obs.obs_data_set_int(settings1, "outline_opacity", 100) elif source2 is not None: obs.obs_data_set_string(settings2, "text", last_slideText) obs.obs_data_set_int(settings1, "opacity", 0) obs.obs_data_set_int(settings2, "opacity", 100) obs.obs_data_set_int(settings1, "outline_opacity", 0) obs.obs_data_set_int(settings2, "outline_opacity", 100) obs.obs_source_update(source1, settings1) obs.obs_source_update(source2, settings2) obs.obs_data_release(settings1) obs.obs_data_release(settings2) obs.obs_source_release(source1) obs.obs_source_release(source2)
def do_stuff(props, prop): obs.obs_frontend_recording_start() obs.obs_frontend_recording_stop() time.sleep(0.35) # 1. Get Freeze Frame, Save it to disk somewhere latest_frame = helpers.get_latest_frame() sepia_frame = helpers.sepia_filter(latest_frame) mixed_frame = cv2.addWeighted(latest_frame, 0.4, sepia_frame, 0.6, 0) blurred_frame = helpers.blur(mixed_frame, kernel_size=21) # Dunno why default folder isn"t loading ? img_path = os.path.join("C:\\Users\\avikn\\Videos\\", "blurred.png") cv2.imwrite(img_path, blurred_frame) # 2. Add the freeze frame as a new full sized source current_scene_source = obs.obs_frontend_get_current_scene() obs.obs_frontend_set_current_preview_scene(current_scene_source) current_scene = obs.obs_scene_from_source(current_scene_source) freeze_frame = obs.obs_source_create("image_source", "FreezeFrame", None, None) image_settings = obs.obs_data_create() obs.obs_data_set_string(image_settings, "file", img_path) obs.obs_source_update(freeze_frame, image_settings) obs.obs_data_release(image_settings) freeze_item = obs.obs_scene_add(current_scene, freeze_frame) obs.obs_frontend_preview_program_trigger_transition() #3. Add A text source for the We'll be right back text = obs.obs_source_create("text_gdiplus", "BeRightBack", None, None) text_settings = obs.obs_data_create() font_settings = obs.obs_data_create() obs.obs_data_set_string(font_settings, "face", "Helvetica") obs.obs_data_set_int(font_settings, "size", 128) obs.obs_data_set_obj(text_settings, "font", font_settings) obs.obs_data_set_bool(text_settings, "outline", True) obs.obs_data_set_double(text_settings, "outline_size", 10) obs.obs_data_set_double(text_settings, "outline_color", 0) obs.obs_data_set_bool(text_settings, "extents", True) obs.obs_data_set_bool(text_settings, "extents_wrap", True) obs.obs_data_set_int(text_settings, "extents_cx", 300) obs.obs_data_set_int(text_settings, "extents_cy", 600) obs.obs_data_set_string(text_settings, "text", "We'll Be Right Back") obs.obs_source_update(text, text_settings) obs.obs_data_release(text_settings) text_item = obs.obs_scene_add(current_scene, text) #4. Add a media source for the sound byte song = obs.obs_source_create("ffmpeg_source", "Jingle", None, None) song_settings = obs.obs_data_create() obs.obs_data_set_string(song_settings, "local_file", "C:\\Users\\avikn\\Downloads\\brb_jingle.mp3") obs.obs_source_update(song, song_settings) obs.obs_data_release(song_settings) song_item = obs.obs_scene_add(current_scene, song) #5. Trigger the transition so that it updates, mute other audio sources. obs.obs_frontend_preview_program_trigger_transition() desktop_audio = obs.obs_get_output_source(1) obs.obs_source_set_muted(desktop_audio, True) mic_audio = obs.obs_get_output_source(4) obs.obs_source_set_muted(mic_audio, True) #6. Artificial delay, then cleanup. time.sleep(4) obs.obs_sceneitem_remove(freeze_item) obs.obs_source_release(freeze_frame) obs.obs_sceneitem_remove(text_item) obs.obs_source_release(text) obs.obs_sceneitem_remove(song_item) obs.obs_source_release(song) obs.obs_source_set_muted(desktop_audio, False) obs.obs_source_set_muted(mic_audio, False) obs.obs_frontend_preview_program_trigger_transition()
def stop_recording_action(calldata): global action signal_output = obs.calldata_ptr(calldata, 'output') code = obs.calldata_int(calldata, 'code') if signal_output != output: print( f'WARNING: Match stop recording signal called with non-match output' ) print() return output_settings = obs.obs_output_get_settings(output) video_path = obs.obs_data_get_string(output_settings, 'path') obs.obs_data_release(output_settings) if code != 0: # OBS_OUTPUT_SUCCESS == 0 print(f'ERROR: Match recording not stopped successfully') print() return if action == 'upload': print( f'Uploading recording for {get_match_name()} at "{video_path}"' ) if not obs.obs_data_get_string( settings, 'google_project_id') or not obs.obs_data_get_string( settings, 'google_client_id') or not obs.obs_data_get_string( settings, 'google_client_secret'): print( f'ERROR: Google API Project ID, Client ID, and Client Secret are all required' ) print() return video_title = f'{obs.obs_data_get_string(settings, "event_name")} - {get_match_name()}' print(f' Video title: {video_title}') metadata_fd, metadata_path = tempfile.mkstemp(suffix='.json', text=True) with os.fdopen(metadata_fd, 'w') as metadata_f: json.dump( { 'path': video_path, 'title': video_title, 'google_project_id': obs.obs_data_get_string(settings, 'google_project_id'), 'google_client_id': obs.obs_data_get_string(settings, 'google_client_id'), 'google_client_secret': obs.obs_data_get_string(settings, 'google_client_secret'), 'description': obs.obs_data_get_string(settings, 'youtube_description'), 'category_id': obs.obs_data_get_string(settings, 'youtube_category_id'), 'privacy': obs.obs_data_get_string(settings, 'youtube_privacy_status'), 'playlist': obs.obs_data_get_string(settings, 'youtube_playlist'), 'toa_key': obs.obs_data_get_string(settings, 'toa_key'), 'match': f'{obs.obs_data_get_string(settings, "toa_event")}-{"Q" if obs.obs_data_get_string(settings, "match_type") == "qualification" else "E"}{obs.obs_data_get_int(settings, "match_code"):03}-1' }, metadata_f) print(f' Metadata Path: {metadata_path}') log_fd, log_path = tempfile.mkstemp(suffix='.txt') print(f' Log Path: {log_path}') children.append((subprocess.Popen( [python_path, __file__, 'upload', metadata_path], stdin=subprocess.DEVNULL, stdout=log_fd, stderr=subprocess.STDOUT), log_path)) os.close(log_fd) obs.obs_data_set_int( settings, 'match_number', obs.obs_data_get_int(settings, 'match_number') + 1) obs.obs_data_set_int( settings, 'match_code', obs.obs_data_get_int(settings, 'match_code') + 1) print() elif action == 'cancel': print( f'Cancelling upload for {get_match_name()} at "{video_path}"') print() action = 'none'