def post_handler(): global _courier if not _courier: return ('', 500) # Parse request try: data = request.get_data().decode('utf8') req = tuple(i.strip() for i in data.split(':')) except Exception as e: print(e) return ('', 400) req_obj = None if req[0] == 'video' and len(req) >= 2: req_obj = Video(req[1]) if not req_obj.duration: return ('', 400) elif req[0] == 'sound' and len(req) >= 3 and util.search_sounds( audiopath, req[1], req[2]): req_obj = Sound(req[1], req[2]) elif req[0] == 'skip': req_obj = Skip() elif req[0] == 'checkin': req_obj = CheckIn() elif req[0] == 'checkout': req_obj = CheckOut() else: # Malformed request return ('', 400) _courier.put(req_obj) return ('', 202)
def __init__(self, session, sref, playlist, playlistName, playlistCB, subtitles=None, autoPlay=True, showProtocol=False, onStartShow=False, repeat=False): BaseArchivCZSKScreen.__init__(self, session) self.onPlayService = [] self.settings = config.plugins.archivCZSK.videoPlayer self.sref = sref # # set default/non-default skin according to SD/HD mode if self.settings.useDefaultSkin.getValue(): self.setSkinName("MoviePlayer") else: HD = getDesktop(0).size().width() == 1280 if HD: self.setSkin("ArchivCZSKMoviePlayer_HD") else: self.setSkinName("MoviePlayer") # # init custom infobar (added info about download speed, buffer level..) ArchivCZSKMoviePlayerInfobar.__init__(self) # # custom actions for MP self["actions"] = HelpableActionMap(self, "ArchivCZSKMoviePlayerActions", { "leavePlayer": (self.leavePlayer, _("leave player?")), "toggleShow": (self.toggleShow, _("show/hide infobar")), "audioSelection":(self.audioSelection, _("show audio selection menu")), "refreshSubs":(self.refreshSubs, _("refreshing subtitles position")), "subsDelayInc":(self.subsDelayInc, _("increasing subtitles delay")), "subsDelayDec":(self.subsDelayDec, _("decreasing subtitles delay")) }, -3) InfoBarBase.__init__(self, steal_current_service=True) # init of all inherited screens for x in HelpableScreen, InfoBarShowHide, \ InfoBarSeek, InfoBarAudioSelection, InfoBarNotifications, \ InfoBarServiceNotifications, HelpableScreen, InfoBarPVRState, \ InfoBarAspectChange, InfoBarServiceErrorPopupSupport: x.__init__(self) # init subtitles initSubsSettings() SubsSupport.__init__(self, subsPath=subtitles, defaultPath=config.plugins.archivCZSK.subtitlesPath.getValue(), forceDefaultPath=True, searchSupport=True) # playlist support InfoBarPlaylist.__init__(self, playlist, playlistCB, playlistName, autoPlay=autoPlay, onStartShow=onStartShow, repeat=repeat, showProtocol=showProtocol) # to get real start of service, and for queries for video length/position self.video = Video(session) # # bindend some video events to functions self.__event_tracker = ServiceEventTracker(screen=self, eventmap= { iPlayableService.evStart: self.__serviceStarted, iPlayableService.evUpdatedEventInfo: self.__evUpdatedEventInfo, iPlayableService.evUser + 10: self.__evAudioDecodeError, iPlayableService.evUser + 11: self.__evVideoDecodeError, iPlayableService.evUser + 12: self.__evPluginError, }) self.statusDialog = session.instantiateDialog(StatusScreen) self.onClose.append(self.statusDialog.doClose) self.isStream = self.sref.getPath().find('://') != -1 self.returning = False
def main(): parser = argparse.ArgumentParser() parser.add_argument('--video_dir', help='Directory containing video directories.') parser.add_argument('--interval', default=2, type=int, help='Interval between frames.') parser.add_argument('--middle_interval', default=1, type=int, help='Prediction target in between.') parser.add_argument('--num_iters', default=100000, type=int, help="Number of training iterations.") parser.add_argument('--batch_size', default=64, type=int, help="Batch size.") parser.add_argument('--model_path', default='video_upsampling.pt', help="Path to video upsampling model.") parser.add_argument('--mode', help="Train or eval.") parser.add_argument('--test_video', help="Test video output path.") parser.add_argument( '--preload_imgs', dest='preload_imgs', action='store_true', help="Whether to load images into memory ahead of time.") parser.add_argument('--num_workers', default=0, type=int, help="Number of workers for DataLoader") parser.add_argument('--nn_start_channels', default=16, type=int, help="Number of channels in starting layer.") parser.add_argument('--nn_num_layers', default=4, type=int, help="Number of up/down conv layers.") parser.add_argument('--upsample_op', default='bilinear', help="Upsampling op.") parser.add_argument('--downsample_op', default='strided_conv', help="Downsampling op.") parser.add_argument('--unet', dest='unet', action='store_true', help="Whether to use lateral connections a la UNet.") parser.add_argument('--output_activation', default='sigmoid', help="Activation function applied to outputs.") parser.add_argument('--logdir', help="Directory to which to log outputs.") parser.add_argument('--loss_function', default='mse', help="Final layer loss function.") parser.add_argument('--resize', help="Resize inputs to height,width.") parser.set_defaults(preload_imgs=False, unet=False) args = parser.parse_args() device = torch.device("cuda") if not os.path.exists(args.logdir): os.makedirs(args.logdir) # Should fail if directory already exists. from torchvision import transforms transform_list = [transforms.ToTensor()] if args.resize: height, width = map(int, args.resize.split(',')) transform_list = [transforms.Resize((height, width))] + transform_list if args.output_activation == 'sigmoid': output_activation = F.sigmoid preprocess_fn, postprocess_fn = lambda x: x, lambda x: x elif args.output_activation == 'tanh': output_activation = F.tanh preprocess_fn, postprocess_fn = lambda x: 2 * ( x - 0.5), lambda x: 0.5 * (x + 1) else: print("Output activation function must be in {sigmoid, tanh}.") sys.exit(1) if args.loss_function == 'mse': loss_function = F.mse_loss elif args.loss_function == 'l1': loss_function = F.l1_loss else: print("Loss function must be in {mse, l1}.") sys.exit(1) transform_list.append(transforms.Lambda(preprocess_fn)) transform_fn = transforms.Compose(transform_list) video_paths = sorted(glob.glob(args.video_dir + '/*')) videos = map( lambda x: Video(x, args.interval, args.middle_interval, args.preload_imgs, transform_fn=transform_fn), video_paths) model = Net(device, args.nn_num_layers, args.nn_start_channels, args.upsample_op, args.downsample_op, args.unet, output_activation, loss_function).to(device) if args.mode == 'train': # optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9) optimizer = optim.Adam(model.parameters(), lr=0.001) video_dataset = VideoDataset(videos, args.batch_size, shuffle=True, num_workers=args.num_workers) train(args, model, device, optimizer, video_dataset) torch.save(model.state_dict(), args.model_path) elif args.mode == 'eval': model.load_state_dict(torch.load(args.model_path)) video_dataset = VideoDataset(videos, batch_size=1, shuffle=False, num_workers=args.num_workers) evaluate(args, model, device, video_dataset, postprocess_fn)