def table_dump(): try: with open(location) as f: table = f.read().splitlines() except FileNotFoundError: f2 = open(location, "w+") f2.close() table_dump() # Change rows to amount of "queued" videos video_list = [] times = [] videos = [] flags = [] for row in table: print(row, "row") try: h, m, s, name, args = row.split(',') # to be safe(and readable), always put the ' ' print("Phase 1") h, m, s = map(int, (h, m, s)) # convert these to int print(name, "name") length = getLength(name) print(length, "length") times.append(":".join([str(h), str(m), str(s)])) videos.append(name) flags.append(args) print("Phase 2") # make sure video uses os-specific directory separator print("Not removed", name) name = name.replace("C:/", "") print("Removed", name) name_split = name.split("/") print("System: ", platform.system()) if platform.system() == "Windows": print("Windows!") name_split.insert(0, "C:\\") print(name_split, "splitname") name = os.path.join(*name_split) else: print(name_split, "splitname") name = "/" + os.path.join(*name_split) print("Name:", name) if h != -1: video_list.append(Video(h, m, s, name, ["--fullscreen"], length, True)) else: video_list.append(Video(h, m, s, name, ["--fullscreen"], length, False)) # print("Video list:", video_list) # Enqueue videos for playing. If the video is set to play manually, do not enqueue. # we need a list to keep track of these threads, so they can be stopped later if the video is deleted print(times, videos, flags, "total") except (IndexError, ValueError): print(row, "row-except") print("excepted") continue print("Video list:", video_list) model = VideoTableModel(None, video_list, ["Video File Name", "Play Time", "Duration"]) ui.table_videos.setModel(model)
def videos_list(): if request.method == 'POST': # We are only expecting the URL and Directory data = request.json vid_url = data["data"]["attributes"]["url"] vid_dir = data["data"]["attributes"]["directory"] if vid_url == None: return mod_response({'error': 'URL not specified'}, 400) if vid_dir == None: vid_dir = "" # Create the Video row & object rowId = create_row(vid_url, "", "Pending", "Placed in Queue", vid_dir, "") newVid = Video(rowId, vid_url, "", "Pending", "Placed in Queue", vid_dir, "") # Pass this video object to the Queue q.put(newVid) return mod_response({'data': newVid.toJson()}, 201) else: data = { 'data': [ Video(row[0], row[1], row[2], row[3], row[4], row[5], row[6]).toJson() for row in get_all_rows() ] } return mod_response(data, 200)
def validation_video_ids(self): # return ids that belong to validation videos, regardless if the labels are valid or not print('val ids...') val_indices = np.nonzero(self.val_ids == 1)[0] val_video_id = [] for i in val_indices: val_video_id.append(self.video_id[i]) self.unique_val_videos = sorted(list( set(val_video_id))) # val should be 145 videos assert len(self.unique_val_videos) == 145 self.val_video_indices = [] self.val_video_real_names = [] self.val_video_types = [] for vid in self.unique_val_videos: tmp = [ i for i in range(len(self.video_id)) if self.video_id[i] == vid and self.val_ids[i] == 1 ] self.val_video_indices.append(tmp) # append the "real name position = get_position(vid) vid_path = os.path.join(self.video_dir, vid + '.mp4') vid_path2 = os.path.join(self.video_dir, vid + '.avi') if os.path.isfile(vid_path): real_name = Video(vid_path).meta['original_video'] elif os.path.isfile(vid_path2): real_name = Video(vid_path2).meta['original_video'] else: print(vid_path) print(vid_path2) raise NameError('video not found') self.val_video_real_names.append(real_name + position) # check what types should be processed for this video self.val_video_types.append( self.get_types_from_name_split(vid, 'val'))
async def play(self, ctx, *, url): """Plays audio hosted at <url> (or performs a search for <url> and plays the first result).""" client = ctx.guild.voice_client state = self.get_state(ctx.guild) # get the guild's state if client and client.channel: try: video = Video(url, ctx.author) except youtube_dl.DownloadError as e: logging.warn(f"Error downloading video: {e}") await ctx.send( "There was an error downloading your video, sorry.") return state.playlist.append(video) message = await ctx.send("Added to queue.", embed=video.get_embed()) await self._add_reaction_controls(message) else: if ctx.author.voice != None and ctx.author.voice.channel != None: channel = ctx.author.voice.channel try: video = Video(url, ctx.author) except youtube_dl.DownloadError as e: await ctx.send( "There was an error downloading your video, sorry.") return client = await channel.connect() self._play_song(client, state, video) message = await ctx.send("", embed=video.get_embed()) await self._add_reaction_controls(message) logging.info(f"Now playing '{video.title}'") else: raise commands.CommandError( "You need to be in a voice channel to do that.")
def testCheckVideoInput(self): self.hyperlapse.checkVideoInput() self.hyperlapse.video = Video('') self.assertRaises(InputError, self.hyperlapse.checkVideoInput) self.hyperlapse.video = Video( '/home/victorhugomoura/Documents/example.csv') self.assertRaises(InputError, self.hyperlapse.checkVideoInput)
def update_list(self, videos): if 'entries' in videos: for video in videos['entries']: if video['title'] not in (v.original_title for v in self.videos): record = Video(video['url'], video['title'], video['duration'], video['thumbnail']) record.is_checked = True record.filesize = self.get_filesize( video['formats'], 'best') # TODO now its only best quality self.videos.append(record) else: if videos['title'] not in (v.original_title for v in self.videos): record = Video(videos['url'], videos['title'], videos['duration'], videos['thumbnail']) record.is_checked = True record.filesize = self.get_filesize( videos['formats'], 'best') # TODO now its only best quality self.videos.append(record) self.show_list()
def train(model, output_dir, train_feed, test_feed, lr_start=0.01, lr_stop=0.00001, lr_gamma=0.75, n_epochs=150, gan_margin=0.35): n_hidden = model.latent_encoder.n_out # For plotting original_x = np.array(test_feed.batches().next()[0]) samples_z = np.random.normal(size=(len(original_x), n_hidden)) samples_z = (samples_z).astype(dp.float_) recon_video = Video(os.path.join(output_dir, 'convergence_recon.mp4')) sample_video = Video(os.path.join(output_dir, 'convergence_samples.mp4')) original_x_ = original_x original_x_ = img_inverse_transform(original_x) sp.misc.imsave(os.path.join(output_dir, 'examples.png'), dp.misc.img_tile(original_x_)) # Train network learn_rule = dp.RMSProp() annealer = dp.GammaAnnealer(lr_start, lr_stop, n_epochs, gamma=lr_gamma) trainer = aegan.GradientDescent(model, train_feed, learn_rule, margin=gan_margin) try: for e in range(n_epochs): model.phase = 'train' model.setup(*train_feed.shapes) learn_rule.learn_rate = annealer.value(e) / train_feed.batch_size trainer.train_epoch() model.phase = 'test' original_z = model.encode(original_x) recon_x = model.decode(original_z) samples_x = model.decode(samples_z) recon_x = img_inverse_transform(recon_x) samples_x = img_inverse_transform(samples_x) recon_video.append(dp.misc.img_tile(recon_x)) sample_video.append(dp.misc.img_tile(samples_x)) except KeyboardInterrupt: pass model.phase = 'test' n_examples = 100 test_feed.reset() original_x = np.array(test_feed.batches().next()[0])[:n_examples] samples_z = np.random.normal(size=(n_examples, n_hidden)) output.samples(model, samples_z, output_dir, img_inverse_transform) output.reconstructions(model, original_x, output_dir, img_inverse_transform) original_z = model.encode(original_x) output.walk(model, original_z, output_dir, img_inverse_transform) return model
def process_screen(self): """Method for process current screen. Form handling, initialize necessary classes, etc""" # If it is simple menu-type screen, just listen keyboard to switch screen if self.name in self.menu_map.keys(): self.set_next_screen() # If download directory is not given, user have to do it elif self.name == 'force_download_path' or self.name == 'force_download_path_invalid': path = input() if self.path.is_valid(path): self.path.write(path) self.next = 'force_download_path_valid' else: self.next = 'force_download_path_invalid' # If user want to change download directory, use this code elif self.name == 'set_download_path_input' or self.name == 'set_download_path_invalid': path = input() if path == '': self.next = 'main_menu' else: if self.path.is_valid(path): self.path.write(path) self.next = 'set_download_path_valid' else: self.next = 'set_download_path_invalid' elif self.name == 'set_download_path_valid': time.sleep(2) self.next = 'main_menu' # If user want to download file from yt, use this code: elif self.name == 'video_menu' or self.name == 'video_invalid': link = input() if link == '': self.next = 'main_menu' else: video = Video(link) if video.link_is_correct: video.download(self.path.read()) self.next = 'video_valid' else: self.next = 'video_invalid' elif self.name == 'mp3_menu' or self.name == 'mp3_invalid': link = input() if link == '': self.next = 'main_menu' else: video = Video(link) if video.link_is_correct: video.download(self.path.read(), as_mp3=True) self.next = 'mp3_valid' else: self.next = 'mp3_invalid' elif self.name == 'channel_menu': # TODO: Create channel menu for full channel download self.next = 'main_menu'
async def playall(self, ctx, *, url): """Queues list hosted at <url> (or performs a search for <url> and plays the queues the whole list).\nCurrently only supports Youtube content.\nPlease visit https://gitlab.com/indecent/pyqbot/issues and open a new issue if you would like more sources added.""" client = ctx.guild.voice_client state = self.get_state(ctx.guild) # get the guild's state videos = Videos(url, ctx.author) if videos.stream_urls is not None: db_client = Database() play_limit = db_client.get_queue_limit(ctx.guild.id) if play_limit == 0: play_limit = 15 message = await ctx.send("You currently have no limit set for this command!\nThe default limit for this command to queue songs is 15.\nContact an administrator on your server to run the `setplaylimit` command to set a new limit!") for stream_url in videos.stream_urls: if videos.stream_urls.index(stream_url) < play_limit: if client and client.channel: try: video = Video(stream_url, ctx.author, len(state.playlist)+1) except youtube_dl.DownloadError as e: logging.warn(f"Error downloading video: {e}") await ctx.send( f"There was an error downloading your video, {stream_url}, sorry.") try: state.playlist.append(video) except Exception: play_limit += 1 else: if ctx.author.voice != None and ctx.author.voice.channel != None: channel = ctx.author.voice.channel try: video = Video(stream_url, ctx.author, len(state.playlist)) except youtube_dl.DownloadError as e: await ctx.send( f"There was an error downloading your video, {stream_url}, sorry.") try: if video: client = await channel.connect() self._play_song(client, state, video) message = await ctx.send("", embed=video.get_embed()) await self._add_reaction_controls(message) logging.info(f"Now playing '{video.title}'") except Exception as e: pprint(e) play_limit += 1 else: message = await ctx.send("You need to be in a voice channel to do that.") raise commands.CommandError("You need to be in a voice channel to do that.") else: message = await ctx.send("I couldn't find what you were looking for. You might need to use the `play` command for that.")
def setUp(self): self.videos = [Video(0, 50), Video(1, 30)] self.cache_servers = [CacheServer(0, 40)] self.endpoints = [ Endpoint(0, { 0: 100, 1: 200 }, [(10, self.cache_servers[0])]), Endpoint(0, { 0: 100, 1: 50 }) ]
def get_videos_from(result: UrlResult) -> Iterable[Video]: if result.type == "single_video": yield Video(result.url) elif result.type == "playlist": try: for video in pafy.get_playlist(result.url)['items']: try: yield Video(video['pafy']) except: continue except ValueError: print("Playlist is missing/private.")
def testCheckParameters(self): self.stabilizer.checkParameters() self.stabilizer.originalVideo = Video( '/home/victorhugomoura/Documents/example.csv') self.assertRaises(InputError, self.stabilizer.checkParameters) self.setUp() self.stabilizer.acceleratedVideo = Video( '/home/victorhugomoura/Documents/out/example.csv') self.assertRaises(InputError, self.stabilizer.checkParameters) self.setUp() self.stabilizer.velocity = '1' self.assertRaises(InputError, self.stabilizer.checkParameters)
def youtube_search(searchTerm): # Call the search.list method to retrieve results matching the specified # query term. search_response = youtube.search().list(q=searchTerm, regionCode='US', relevanceLanguage='en', safeSearch='none', fields='items(id, snippet/title)', type='video', part='id,snippet', maxResults=3).execute() seedVideos = [] # Add each result to the appropriate list, and then display the lists of # matching videos, channels, and playlists. for search_result in search_response.get('items', []): if search_result['id']['kind'] == 'youtube#video': id = search_result['id']['videoId'] title = search_result['snippet']['title'] video = Video(id, title, 0) seedVideos.append(video) else: print('Found something that was not a video') return seedVideos
def main(): gfx = Video() inp = Input() gui = GUI() # Initialize try: gfx.initialize() inp.initialize() gui.initialize() except InitializationError as error: print(error) return 1 # Setup the interface gui.setupInterface() # Main Loop gfx.enterMainLoop() # Done # - We will never actually get here. gfx.shutdown() return 0
def __init__(self, verbose, scale): # self._verbose = verbose # CPU properties # 16 general purpose 8-bit registers self._reg = array.array( 'B', [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # 16-bit register self._I = array.array('H', [0]) # Timers delay = 0 / sound = 1 self._timer = array.array('B', [0, 0]) # Stack self._stack = array.array( 'H', [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # Program Counter self._PC = array.array('H', [0x0200]) # Memory self.memory = Memory() # Video self.video = Video(verbose, scale) # Key states self._keystate = array.array( 'B', [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # private properties self.__ips = 60 self.clock = pygame.time.Clock()
def setUp(self): video = Video('/home/victorhugomoura/Documents/example.mp4') extractor = 'face' velocity = 10 self.hyperlapse = SemanticHyperlapse(video, extractor, velocity)
def get_videos(channel_id, date): # Call the search.list method to retrieve results matching the specified # query term. global __yt search_response = __yt.search().list( channelId=channel_id, part="id,snippet", order="date", maxResults=50, publishedBefore=date, publishedAfter=dt.datetime.strptime('2000-01-01T00:00:00.0', '%Y-%m-%dT%H:%M:%S.%f').isoformat('T') + 'Z' ).execute() videos = [] # Add each result to the appropriate list, and then display the lists of # matching videos, channels, and playlists. for search_result in search_response.get("items", []): if search_result["id"]["kind"] == "youtube#video": if search_result["snippet"]["liveBroadcastContent"] not in ["upcoming", "live"]: videos.append(Video( search_result["id"]["videoId"], search_result["snippet"]["title"], search_result["snippet"]["description"], dt.datetime.strptime(search_result["snippet"]["publishedAt"][:-1], '%Y-%m-%dT%H:%M:%S.%f') )) return videos
def __init__(self, parent=None, max_buf_size=500): super(VideoWidget, self).__init__(parent) self.video = Video(max_buf_size=max_buf_size) self.max_buf_size = max_buf_size self.init_ui() self.slider.sliderReleased.connect(self.on_slider_released) self.installEventFilter(self)
def __iter__(self): for i in range(len(self.samples)): # get random sample path, target = self.samples[i] # get video object vid = Video(path, debug=False) video_frames = [] # video frame buffer # seek and return frames max_seek = vid.metadata[vid.current_stream]['duration'] - ( self.clip_len / vid.metadata[vid.current_stream]['fps'] + self.alpha) step = max(max_seek // self.num_steps, 1) tss = [ i.item() for i in list( torch.linspace(0, max_seek, steps=self.num_steps)) ] for start in tss: vid.seek(start, stream="video", any_frame=True) while len(video_frames) < self.clip_len: frame, current_pts = vid.next("video") video_frames.append(self.frame_transform(frame)) # stack it into a tensor video = torch.stack(video_frames, 0) if self.video_transform: video = self.video_transform(video) output = { 'path': path, 'video': video, 'target': target, 'start': start, 'end': current_pts } yield output
def __init__(self): self.screen = pygame.display.get_surface() self.gamestate = StateMachine() self.data = None self.video = Video() self.audio = Audio(self) self.running = False self.all_maps_loaded = False self.force_bg_music = False self.clock = pygame.time.Clock() self.playtime = 0.0 self.dt = 0.0 self.key_timer = 0.0 self.state_timer = 0.0 self.debugfont = pygame.font.SysFont(DEBUGFONT, DEBUGFONTSIZE) self.key_input = None self.mouse_input = None self.show_debug = False self.debug_mode = False self.fps = FPS threading.Thread(target=self.load_all_maps).start()
def _feature_extractor_component(self): """ Method that creates all feature extraction UI components and their logical dependencies. """ # creates a title UI component st.sidebar.title("Pick a feature extractor") # gets all available extraction strategies available_extraction_strategies = ExtractorFactory.values_list() # creates a selection box UI component with the available_extraction_strategies # and stores the selected in a variable self.selected_extractor = st.sidebar.selectbox( 'Select an extraction strategy', available_extraction_strategies) # gets the feature extractor object from the ExtractorFactory self.feature_extractor = ExtractorFactory.get( self.selected_extractor)() # creates the video object with its path and selected extractor name self.video = Video(self.video_path, self.selected_extractor) # if the video doesn't have features extracted with this extractor # it extracts the features if self.video.features.has_features is False: with st.spinner("Extracting..."): self.feature_extractor.extract(self.video)
def test_codec(self): path = self._test_file('test_video_read.mp4') ref_codec = 'avc1' with Video(path) as video: self.assertEqual(video.codec, ref_codec)
def __iter__(self): for i in range(self.epoch_size): # get random sample path, target = random.choice(self.samples) # get video object vid = Video(path, debug=False) video_frames = [] # video frame buffer # seek and return frames max_seek = vid.metadata[vid.current_stream]['duration'] - (self.clip_len / vid.metadata[vid.current_stream]['fps'] + self.alpha) start = random.uniform(0., max_seek) vid.seek(start, stream="video", any_frame=self.from_keyframes) while len(video_frames) < self.clip_len: frame, current_pts = vid.next("video") video_frames.append(self.frame_transform(frame)) # stack it into a tensor video = torch.stack(video_frames, 0) if self.video_transform: video = self.video_transform(video) output = { 'path': path, 'video': video, 'target': target, 'start': start, 'end': current_pts} yield output
def download(self, download_path: str = "downloads"): # TODO: Add creating folders if not exist videos = [ "https://www.cda.pl/video/2486267d4", "https://www.cda.pl/video/248630943", "https://www.cda.pl/video/250105452", "https://www.cda.pl/video/2518128a9", "https://www.cda.pl/video/2531640cb", "https://www.cda.pl/video/2546847e8", "https://www.cda.pl/video/256809044", "https://www.cda.pl/video/2583666bd", "https://www.cda.pl/video/25999076f", "https://www.cda.pl/video/261542952", "https://www.cda.pl/video/2652516eb", "https://www.cda.pl/video/2671470a4", "https://www.cda.pl/video/3424375c3", "https://www.cda.pl/video/3443971c3", "https://www.cda.pl/video/3463282e8", "https://www.cda.pl/video/3485605e6", "https://www.cda.pl/video/351312746", "https://www.cda.pl/video/35377301f", "https://www.cda.pl/video/35612772a", "https://www.cda.pl/video/358452193", "https://www.cda.pl/video/360697930", "https://www.cda.pl/video/362523795" ] for url in videos: video = Video(url) print(url) video.download(download_path)
def displayVideo(self, file_path, width=None, height=None): if width == None: width = self.width if height == None: height = self.height video = Video(self.disp, width, height, file_path) video.play()
def read_data(filename): with open('../inputs/%s.in' % filename) as f: fl = line_to_int_list(f) num_endpoint = fl[1] num_request = fl[2] num_cache = fl[3] cache_size = fl[4] cache_servers = [CacheServer(i, cache_size) for i in range(num_cache)] vidoe_sizes = line_to_int_list(f) vidoes = [Video(i, s) for i, s in enumerate(vidoe_sizes)] end_points = list() for i in range(num_endpoint): l = line_to_int_list(f) endpoint = Endpoint(i) for j in range(l[1]): ll = line_to_int_list(f) cache_serv = cache_servers[ll[0]] endpoint.latency.append((ll[1], cache_serv)) end_points.append(endpoint) for i in range(num_request): lr = line_to_int_list(f) vi = vidoes[lr[0]] end_points[lr[1]].requests.append((lr[2], vi)) return (cache_servers, vidoes, end_points)
def __init__(self, verbose=False): self.verbose = verbose self.status_counter = 0 self.state = self.STATE_WAITING self.currenttime = 0 self.flex_fish_limit = self.FLEX_FISH_LIMIT self.player = Player(self, verbose) self.adc_sensors = AdcSensors(self, verbose) self.motors = Motors(self, verbose) self.web_connection = WebConnection(self, verbose) self.gps_tracker = GpsTracker(self, verbose) self.video = Video(self, verbose) # Initial valus for settings # Speed: (0-1). 1 = Full speed ahead # Turn = -1 - +1 = +1 Only left motor on (turn to right) # 0 Both motors same speed # -1 Only right motor on (turn to left) self.speed = 0.0 self.turn = 0.0 # speed style examples: # - Constant speed = (low_speed_percent = 100) # - Stop and go jigging with 6 sec motor on and 4 sec stop. low_speed_percent = 0,speed_change_cycle = 10, speed_motors_full_percent = 60 # - Trolling with 10 sec half speed and 5 sec full speed. low_speed_percent = 50, speed_change_cycle = 15, speed_motors_full_percent = 66.66 self.speed_change_cycle = 0 self.speed_motors_full_percent = 100 self.low_speed_percent = 0 # Play music or not self.play_music = False
def get_info_list(path): workspace_path = path + "/workspace" pic_info = [] pic_info.append(PIC_SHEET_TITLE) vid_info = [] vid_info.append((VID_SHEET_TITLE)) target_files = [] for root, dirs, files in os.walk(workspace_path): for file in files: tmp_file = root + "/" + file target_files.append(tmp_file) for file in sort_by_creation_time(target_files): _, file_suffix = file.split(".") if file_suffix in PIC_FORMAT: pic = Picture(file) pic_item = pic.get_info() pic_info.append(pic_item) #二维列表 elif file_suffix in VID_FORMAT: vid = Video(file) vid_item = vid.get_info() vid_info.append(vid_item) else: continue return pic_info, vid_info
def generate_data(data_dir, detector, transform, device, pose_model, out): ''' Generate series of poses from a folder of videos and then normalize them. ''' data = [] mask = [] for filename in tqdm(os.listdir(data_dir)): video = Video(os.path.join(data_dir, filename), detector, transform, device, pose_model) video.extract_poses() generator = PoseSeriesGenerator(video, 10, 7) series, mask_ = generator.generate() data.extend(series) mask.extend(mask_) data = np.asarray(data) mask = np.asarray(mask) # get the head by taking the average of five key points on the head (nose, left_eye, right_eye, left_ear, right_ear) data[:, :, 4][mask] = np.mean(data[:, :, :5][mask], axis=1) data = data[:, :, 4:] # min-max normalization min = np.min(data[:, :, :, :2][mask], axis=1, keepdims=True) max = np.max(data[:, :, :, :2][mask], axis=1, keepdims=True) data[:, :, :, :2][mask] = (data[:, :, :, :2][mask] - min) / (max - min) # get the origin by taking the average of four key points on the body (left_shoulder, right_shoulder, left_hip, right_hip) origin = (np.sum(data[:, :, 1:3, :2][mask], axis=1, keepdims=True) + np.sum(data[:, :, 7:9, :2][mask], axis=1, keepdims=True)) / 4 # shift the origin data[:, :, :, :2][mask] = data[:, :, :, :2][mask] - origin # save into file np.save(out, data)
def test_width(self): path = self._test_file('test_video_read.mp4') ref_width = 1092 with Video(path) as video: self.assertEqual(video.width, ref_width)