コード例 #1
0
 def init_services(self):
     '''
     Initializes music service structures
     :return: Structure of Playback devices representing each available music
         service to the Pi
     '''
     services = []
     # attempt to initialize local service. If it fails, don't load that
     # service
     try:
         cachePath = self.run_dir + ".mood_switch_cache/"
         # make directory if missing
         if not (os.path.exists(cachePath)):
             os.makedirs(cachePath)
         # make services
         local_service = LocalService(self.run_dir + "local_music/")
         radio_service = RadioService()
         # init a single player for all music services
         self.player = Playback.constructPlayer()
         # add services
         services.append(Playback(self.player, local_service, cachePath))
         # remote services, such as the radio service will constantly
         # throw errors if there is no X11 (although they appear to work)
         # So they are disabled if X11 is missing
         if (os.environ.get("DISPLAY") != None):
             services.append(Playback(self.player, radio_service,
                                      cachePath))
     except ServiceException:
         print("Warning: No local music found")
     return services
コード例 #2
0
 def init_services(self):
     '''
     Initializes music service structures
     :return: Structure of Playback devices representing each available music
         service to the Pi
     '''
     services = []
     # attempt to initialize local service. If it fails, don't load that
     # service
     try:
         cachePath = self.run_dir + ".mood_switch_cache/"
         # make directory if missing
         if not(os.path.exists(cachePath)):
             os.makedirs(cachePath)
         # make services
         local_service = LocalService(self.run_dir + "local_music/")
         radio_service = RadioService()
         # init a single player for all music services
         self.player = Playback.constructPlayer()
         # add services
         services.append(Playback(self.player, local_service, cachePath))
         # remote services, such as the radio service will constantly
         # throw errors if there is no X11 (although they appear to work)
         # So they are disabled if X11 is missing
         if (os.environ.get("DISPLAY") != None):
             services.append(
                 Playback(self.player, radio_service, cachePath))
     except ServiceException:
         print("Warning: No local music found")
     return services
コード例 #3
0
ファイル: main.py プロジェクト: varunm22/acahero
 def toggleSong(self, song):
     if song in self.songs:
         playback = self.songs[song]
         playback.toggle()
     else:
         self.songs[song] = Playback(song)
         self.add_widget(self.songs[song])
コード例 #4
0
 def __init__(self):
     # Wait for motors to be loaded in param server
     time.sleep(3)
     robot_name = rospy.get_param('/robot_name')
     if robot_name:
         motors = rospy.get_param('/' + robot_name + '/motors')
     else:
         motors = rospy.get_param('motors')
     expressions = rospy.get_param('expressions',{})
     expressions = OrderedDict((v.keys()[0],v.values()[0]) for k,v in enumerate(expressions))
     #Expressions to motors mapping
     self.faces = FaceExpr.FaceExprMotors.from_expr_yaml(expressions, to_dict(motors, "name"))
     # Animation objects
     animations = rospy.get_param('animations',{})
     animations = OrderedDict((v.keys()[0],v.values()[0]) for k,v in enumerate(animations))
     self.animations = Animation.from_yaml(animations)
     # Motor commands will be sent to this publisher.
     self.publishers = {}
     self.playback = Playback(to_dict(motors, "name"), self.publisher)
     # Subscribe motor topics based on their type
     for m in motors:
         if not m['topic'] in self.publishers.keys():
             # Pololu motor if motor_id is specified
             if m['hardware'] == 'pololu':
                 self.publishers[m['topic']] = rospy.Publisher(m['topic']+"/command",MotorCommand, queue_size=30)
             else:
                 self.publishers[m['topic']] = rospy.Publisher(m['topic']+"_controller/command",Float64, queue_size=30)
コード例 #5
0
ファイル: head_ctrl.py プロジェクト: red-five-bot/HEAD
 def __init__(self):
     # Wait for motors to be loaded in param server
     time.sleep(3)
     robot_name = rospy.get_param('/robot_name')
     if robot_name:
         motors = rospy.get_param('/' + robot_name + '/motors')
     else:
         motors = rospy.get_param('motors')
     expressions = rospy.get_param('expressions', {})
     expressions = OrderedDict(
         (v.keys()[0], v.values()[0]) for k, v in enumerate(expressions))
     #Expressions to motors mapping
     self.faces = FaceExpr.FaceExprMotors.from_expr_yaml(
         expressions, to_dict(motors, "name"))
     # Animation objects
     animations = rospy.get_param('animations', {})
     animations = OrderedDict(
         (v.keys()[0], v.values()[0]) for k, v in enumerate(animations))
     self.animations = Animation.from_yaml(animations)
     # Motor commands will be sent to this publisher.
     self.publishers = {}
     # Prevents from playing two animations with same prefix
     # For example Left and Right arms can be played at same time but not two animations for same arm.
     # loaded from param server in robot config
     self.animationChannels = rospy.get_param('kf_anim_channels', [])
     self.playback = Playback(to_dict(motors, "name"), self.publisher,
                              self.animationChannels)
     # Create motor publishers by robot names
     for m in motors:
         if not m['topic'] in self.publishers.keys():
             # Pololu motor if motor_id is specified
             if m['hardware'] == 'pololu':
                 self.publishers[m['topic']] = rospy.Publisher(
                     m['topic'] + "/command", MotorCommand, queue_size=30)
             else:
                 self.publishers[m['topic']] = rospy.Publisher(
                     m['topic'] + "_controller/command",
                     Float64,
                     queue_size=30)
コード例 #6
0
def main():
    """Sets up playback and app then runs both in async loop
    """

    ###################################################
    # Setting Up Playback object
    ###################################################

    files = parseconfig("histfile_list")

    playback_list = []
    for fi, hint in files.items():
        playback_list.append(Playback(fi, hint))

    playback = merge_history(playback_list)
    playback.playback_mode = "MANUAL"

    ###################################################
    # Setting Up HspApp object
    ###################################################
    hspApp = HspApp(playback, SAVE_LOCATION)

    ###################################################
    # Setting Up async loop
    ###################################################
    loop = asyncio.get_event_loop()
    use_asyncio_event_loop()
    try:
        # Run command_loop and hspApp.run_async next to each other
        # future: handle when one completes before the other
        loop.run_until_complete(
            asyncio.gather(
                hspApp.command_loop(),
                hspApp.run_async().to_asyncio_future(),
                hspApp.playback.run_async(),
                hspApp.redraw_timer(),
            ))
    finally:
        loop.close()
コード例 #7
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        home_url = 'http://xtheatre.net/'
        search_url = home_url + '?s='
        false_positives = [
            'http://watchxxxhd.net/watch-full-movies-hd/',
            'http://watchxxxhd.net', 'http://watchxxxhd.net/category/movies/',
            'http://watchxxxhd.net/category/ategorized222/',
            'http://watchxxxhd.net/watch-full-movies-hd/'
        ]

        if mode == 'main':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': home_url + '?filtre=date&cat=0',
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'categories',
                'title': a.language(30005),
                'content': '',
                'url': home_url + 'categories/',
                'cover_url': a.image('categories.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'ul', {'class': 'listing-cat'}))
            item_list = []
            if soup:
                for item in soup.findAll('li'):
                    if item:
                        if item.a.get('href') not in false_positives:
                            try:
                                vidcount = item.findAll(
                                    'span',
                                    {'class': 'nb_cat border-radius-5'
                                     })[0].string.encode('UTF-8')
                                vidcount = re.sub('\svideo[s]*', '', vidcount)
                            except:
                                vidcount = '0'
                            if vidcount and vidcount != '0':
                                img = item.find('img')
                                if img:
                                    try:
                                        img = img.get('data-lazy-src')
                                    except:
                                        try:
                                            img = img.get('src')
                                        except:
                                            img = ''
                                if not img:
                                    img = ''
                                title = item.a.get('title').encode(
                                    'UTF-8') + ' (%s)' % vidcount
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    item.a.get('href'),
                                    'content':
                                    '',
                                    'title':
                                    title,
                                    'cover_url':
                                    a.image(img, image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('/page/([0-9]+)/', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('/page/[0-9]+/',
                                           '/page/' + str(item) + '/',
                                           params['url'])
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(
                html,
                parseOnlyThese=SoupStrainer(
                    'ul', {'class': 'listing-videos listing-extract'}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            if soup:
                xbmcdict = XBMCDict(0).update(params)
                for item in soup.findAll(
                        'li', {'class': 'border-radius-5 box-shadow'}):
                    if item:
                        if item.a.get('href') not in false_positives:
                            _dict = xbmcdict.copy()
                            _dict['url'] = item.a.get('href')
                            _dict['title'] = item.a.get('title').encode(
                                'UTF-8')
                            _dict['tvshowtitle'] = _dict['title']
                            _dict['originaltitle'] = _dict['title']
                            img = item.find('img')
                            if img:
                                try:
                                    img = img.get('data-lazy-src')
                                except:
                                    try:
                                        img = img.get('src')
                                    except:
                                        img = ''
                            if not img:
                                img = ''
                            _dict['cover_url'] = a.image(img)
                            _dict['thumb_url'] = _dict['cover_url']
                            _dict['poster'] = _dict['cover_url']
                            _dict['sub_site'] = site
                            plot = item.find('div', {'class': 'right'})
                            if plot:
                                plot = plot.p.contents[0].encode('utf-8')
                                _dict['plot'] = plot
                                _dict['plotoutline'] = plot
                            item_list.extend([_dict])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'class': 'pagination'}))
            last_item = False
            if soup:
                for item in soup.findAll('a'):
                    if (item.string.encode('UTF-8')
                            == 'Last »') or (item.get('class') == 'last'):
                        last_item = item.get('href')
                        break
                if last_item is False:
                    for last_item in soup.findAll('a', {'class': 'inactive'}):
                        pass
                    if last_item: last_item = last_item.get('href')
                item = soup.find('span', {'class': 'current'})
                if item:
                    if item.parent:
                        item = item.parent
                        if item.previousSibling:
                            if item.previousSibling.find('a'):
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    item.previousSibling.a.get('href'),
                                    'content':
                                    params['content'],
                                    'title':
                                    a.language(30017, True),
                                    'cover_url':
                                    a.image('previous.png', image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])
                        if item.nextSibling:
                            if item.nextSibling.find('a'):
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    item.nextSibling.a.get('href'),
                                    'content':
                                    params['content'],
                                    'title':
                                    a.language(30018, True),
                                    'cover_url':
                                    a.image('next.png', image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])
            if last_item:
                item_list.extend([{
                    'site': site,
                    'mode': 'list',
                    'url': last_item,
                    'content': 'goto',
                    'title': a.language(30019, True),
                    'cover_url': a.image('goto.png', image),
                    'backdrop_url': a.art(),
                    'type': 3
                }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'class': 'video-embed'}))
            item_list = []
            if soup:
                for script in soup.findAll(re.compile('s_*c_*r_*i_*p_*t')):
                    item = ''
                    if script.get('src'):
                        if 'http://videomega.tv/validatehash.php' in script[
                                'src']:
                            item = script['src']
                        elif 'ref=' in script.get('src'):
                            temp = re.search('.*ref=[\'"](.+?)[\'"]',
                                             script.get('src'))
                            if temp:
                                item = 'http://videomega.tv/iframe.php?ref=' + temp.group(
                                    1)
                        xbmcdict = XBMCDict(0).update(params)
                        if item:
                            _dict = xbmcdict.copy()
                            _dict['url'] = item
                            item_list.extend([_dict])
                if soup.find('iframe', src=True):
                    item = ''
                    for iframe in soup.findAll('iframe', src=True):
                        if iframe.get('data-lazy-src'):
                            item = iframe.get('data-lazy-src')
                            r = re.search('.+old=(.+)$', item)
                            if r:
                                item = r.group(1)
                        else:
                            item = iframe.get('src').replace('\\', '')
                        xbmcdict = XBMCDict(0).update(params)
                        if item:
                            _dict = xbmcdict.copy()
                            _dict['url'] = item
                            item_list.extend([_dict])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'id': 'video-infos'}))
            if soup:
                item = ''
                for p in soup.findAll('p'):
                    if p.iframe:
                        item = p.iframe.get('src')
                        xbmcdict = XBMCDict(0).update(params)
                        if item:
                            _dict = xbmcdict.copy()
                            _dict['url'] = item
                            item_list.extend([_dict])
            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
コード例 #8
0
ファイル: spihound.py プロジェクト: avishyc/spihound
    df = DataFrame(mapcsv)

    low_addr = int(min(df['Address']), base=16)
    high_addr = int(max(df['Address']), base=16)
    max_time = max(df['Timestamp(ms)'])

    saddr = args.saddr if args.saddr and args.saddr >= low_addr else low_addr
    eaddr = args.eaddr if args.eaddr and args.eaddr <= high_addr else high_addr
    stime = args.stime if args.stime and args.stime >= 0 else 0  # Time always starts a 0
    etime = args.etime if args.etime and args.etime <= max_time else max_time

    dff = df[(df.Address >= hex(saddr)) & (df.Address <= hex(eaddr)) &
             (df['Timestamp(ms)'] >= stime) &
             (df['Timestamp(ms)'] <= etime)].reset_index(drop=True)

    playback = Playback(dff, args.mapfile, saddr, eaddr, stime, etime)
    playback.play()

# --------------------------------------------------------------------------- #
elif args.command == 'analyze':
    mapcsv = read_csv(args.mapfile)
    df = DataFrame(mapcsv)

    high_addr = int(max(df['Address']), base=16)

    if (args.blocksize < 1) or (args.blocksize > high_addr):
        print('Please provide a valid block size: [1, {0}]'.format(high_addr +
                                                                   1))
        exit()

    iblocks = args.iblocks if args.iblocks else None
コード例 #9
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        home_url = 'http://pornhardx.com/'
        movies_url = home_url + 'category/full-movie/'
        scenes_url = home_url + 'video/'
        search_url = home_url + '?s='
        false_positives = [
            'http://pornhardx.com/video',
            'http://pornhardx.com/video/?order=viewed',
            'http://pornhardx.com/video/?order=liked', 'http://pornhardx.com/'
        ]

        if mode == 'main':
            item_list = []
            item_list.extend([{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': scenes_url,
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }])
            item_list.extend([{
                'site': site,
                'mode': 'list',
                'title': a.language(30003),
                'content': '',
                'url': home_url,
                'cover_url': a.image('recent.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }])
            item_list.extend([{
                'site': site,
                'mode': 'categories',
                'title': a.language(30005),
                'content': '',
                'url': scenes_url,
                'cover_url': a.image('categories.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }])
            item_list.extend([{
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }])
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'id': 'navigation-wrapper'}))
            item_list = []
            if soup:
                for item in soup.findAll('a', {'href': True}):
                    if item:
                        if item.get('href') not in false_positives:
                            if 'full-movie' in params['url']:
                                if movies_url != item.get(
                                        'href') and 'full-movie' in item.get(
                                            'href'):
                                    item_list.extend([{
                                        'site':
                                        site,
                                        'mode':
                                        'list',
                                        'url':
                                        item.get('href'),
                                        'content':
                                        '',
                                        'title':
                                        item.contents[0].encode('UTF-8'),
                                        'cover_url':
                                        a.image(image, image),
                                        'backdrop_url':
                                        a.art(),
                                        'type':
                                        3
                                    }])
                            elif 'full-movie' not in item.get('href'):
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    item.get('href'),
                                    'content':
                                    '',
                                    'title':
                                    item.contents[0].encode('UTF-8'),
                                    'cover_url':
                                    a.image(image, image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('/page/([0-9]+)/', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('/page/[0-9]+/',
                                           '/page/' + str(item) + '/',
                                           params['url'])
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(
                html,
                parseOnlyThese=SoupStrainer(
                    'div',
                    {'class': re.compile('col-sm-8(?:\s*main-content)*')}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            if soup:
                xbmcdict = XBMCDict(0).update(params)
                for item in soup.findAll(
                        'div',
                    {
                        'class':
                        re.compile(
                            '.*(?:col-xs-6 item|post type-post status-publish).*'
                        )
                    }):
                    if item:
                        if item.a.get('href') not in false_positives:
                            _dict = xbmcdict.copy()
                            if 'full-movie' not in params['url']:
                                _dict['duration'] = '1500'
                                _dict['content'] = 'episodes'
                            if item.h3:
                                _dict['url'] = item.h3.a.get('href')
                                if item.h3.a.contents:
                                    _dict['title'] = item.h3.a.contents[
                                        0].encode('UTF-8')
                                else:
                                    _dict['title'] = 'Untitled'
                            elif item.h2:
                                _dict['url'] = item.h2.a.get('href')
                                if item.h2.a.contents:
                                    _dict['title'] = item.h2.a.contents[
                                        0].encode('UTF-8')
                                else:
                                    _dict['title'] = 'Untitled'
                            _dict['tvshowtitle'] = _dict['title']
                            _dict['originaltitle'] = _dict['title']
                            _dict['cover_url'] = a.image(item.img.get('src'))
                            _dict['thumb_url'] = _dict['cover_url']
                            _dict['poster'] = _dict['cover_url']
                            _dict['sub_site'] = site

                            item_list.extend([_dict])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'ul', {'class': 'pagination'}))
            if soup.li:
                item = soup.find('a', {'class': 'prev page-numbers'})
                if item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': item.get('href'),
                        'content': params['content'],
                        'title': a.language(30017, True),
                        'cover_url': a.image(image, image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])
                item = soup.find('a', {'class': 'next page-numbers'})
                if item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': item.get('href'),
                        'content': params['content'],
                        'title': a.language(30018, True),
                        'cover_url': a.image(image, image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])
                    if len(soup.findAll('a')) > 2:
                        last_item = soup.find('a', {
                            'class': 'next page-numbers'
                        }).parent.previousSibling.a.get('href')
                        item_list.extend([{
                            'site': site,
                            'mode': 'list',
                            'url': last_item,
                            'content': 'goto',
                            'title': a.language(30019, True),
                            'cover_url': a.image(image, image),
                            'backdrop_url': a.art(),
                            'type': 3
                        }])
                else:
                    item = soup.find('span', {'class': 'page-numbers current'})
                    if item:
                        if len(soup.findAll('a')) > 2:
                            last_item = soup.find(
                                'span', {
                                    'class': 'page-numbers current'
                                }).parent.previousSibling.a.get('href')
                            item_list.extend([{
                                'site':
                                site,
                                'mode':
                                'list',
                                'url':
                                last_item,
                                'content':
                                'goto',
                                'title':
                                a.language(30019, True),
                                'cover_url':
                                a.image('goto.png', image),
                                'backdrop_url':
                                a.art(),
                                'type':
                                3
                            }])
            else:
                soup = BeautifulSoup(html,
                                     parseOnlyThese=SoupStrainer(
                                         'ul', {'class': 'pager'}))
                item = soup.find('li', {'class': 'previous'})
                if item:
                    item_list.extend([{
                        'site':
                        site,
                        'mode':
                        'list',
                        'url':
                        item.previousSibling.get('href'),
                        'content':
                        params['content'],
                        'title':
                        a.language(30017, True),
                        'cover_url':
                        a.image('previous.png', image),
                        'backdrop_url':
                        a.art(),
                        'type':
                        3
                    }])
                item = soup.find('li', {'class': 'next'})
                if item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': item.previousSibling.get('href'),
                        'content': params['content'],
                        'title': a.language(30018, True),
                        'cover_url': a.image('next.png', image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('body'))
            item = ''
            item_list = []
            if soup:
                for item in soup.findAll('param', {'name': 'FlashVars'}):
                    item = item.get('value')
                    item = re.search('.*?proxy\.link=(.+?)&(?:proxy|skin).*?',
                                     item)
                    if item:
                        if item not in item_list:
                            item = item.group(1)
                        else:
                            item = ''
                    else:
                        item = ''
                    xbmcdict = XBMCDict(0).update(params)
                    if item:
                        _dict = xbmcdict.copy()
                        _dict['url'] = item
                        item_list.extend([_dict])
                item = ''
                for item in soup.findAll('video'):
                    for source in soup.findAll('source'):
                        src = source.get('src')
                        if src:
                            xbmcdict = XBMCDict(0).update(params)
                            if item and ('..' not in src):
                                _dict = xbmcdict.copy()
                                try:
                                    _dict['src_title'] = source.get(
                                        'data-res') + 'p'
                                except:
                                    pass
                                _dict['url'] = src
                                item_list.extend([_dict])
                    try:
                        src = item.get('src')
                        if src:
                            xbmcdict = XBMCDict(0).update(params)
                            if item and ('..' not in src):
                                _dict = xbmcdict.copy()
                                try:
                                    _dict['src_title'] = source.get(
                                        'data-res') + 'p'
                                except:
                                    pass
                                _dict['url'] = src
                                item_list.extend([_dict])
                    except:
                        pass
                for script in soup.findAll('script'):
                    item = ''
                    if script.get('src'):
                        if 'http://videomega.tv/validatehash.php' in script[
                                'src']:
                            item = script['src']
                        elif 'ref=' in script.get('src'):
                            temp = re.search('.*ref=[\'"](.+?)[\'"]',
                                             script.get('src'))
                            if temp:
                                item = 'http://videomega.tv/iframe.php?ref=' + temp.group(
                                    1)
                        xbmcdict = XBMCDict(0).update(params)
                        if item:
                            _dict = xbmcdict.copy()
                            _dict['url'] = item
                            item_list.extend([_dict])
                for iframe in soup.findAll('iframe'):
                    item = ''
                    if iframe.get('src'):
                        if 'http://videomega.tv/validatehash.php' in iframe[
                                'src']:
                            item = iframe['src']
                        elif 'ref=' in iframe.get('src'):
                            temp = re.search('.*ref=[\'"](.+?)[\'"]',
                                             iframe.get('src'))
                            if temp:
                                item = 'http://videomega.tv/iframe.php?ref=' + temp.group(
                                    1)
                        else:
                            item = iframe.get('src')
                        xbmcdict = XBMCDict(0).update(params)
                        if item:
                            _dict = xbmcdict.copy()
                            _dict['url'] = item
                            item_list.extend([_dict])

            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
コード例 #10
0
def playback(data, name=False, context=False):
    from playback import Playback
    items.play_item(Playback(data), name, context)
コード例 #11
0
                          resample=resample_t,
                          point_mode=point_finding_mode)
        mt.run(show=show, wait=ms_bt_frames, start_pos=start_position)
    if actions in ['mark', 'both']:
        print >> logfile, "Marking %s #%i" % (mouse, n)
        logfile.flush()
        m = Marker(mouse=mouse, n=n, data_dir=data_dir, mark_mode=mark_mode)
        mres = m.run(resample=resample_m, start_time=start_time)
        if not mres[0]:
            print >> logfile, "WARNING: Marking for %s #%i declared a warning: %s" % (
                mouse, n, mres[1])
            logfile.flush()
    if actions == 'play':
        print >> logfile, "Playing %s #%i" % (mouse, n)
        logfile.flush()
        pb = Playback(ymaze_path=mouse, ymaze_n=n, data_dir=data_dir)
        pb.play(draw=show_tracking)

elif mode == 'collect':
    mice = [
        m for m in os.listdir(data_dir)
        if exclude_word not in m.lower() and m[0] != '.' and 'summary' not in m
    ]

    rows = []
    for mouse in mice:
        print >> logfile, mouse
        logfile.flush()
        fh = FileHandler(data_dir, mouse, n=1)
        for tr in xrange(fh.get_n_trials_wbehav()):
            fhm = FileHandler(data_dir, mouse, n=tr + 1)
コード例 #12
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        home_url = 'http://qwertty.net'
        search_url = home_url + '/index.php?do=search&subaction=search&full_search=0&search_start=0&result_from=1&story='
        false_positives = ['']

        if mode == 'main':
            item_list = [{'site': site, 'mode': 'list', 'title': a.language(30006), 'content': '',
                          'url': home_url, 'cover_url': a.image('all.png', image), 'backdrop_url': a.art(), 'type': 3},
                         {'site': site, 'mode': 'categories', 'title': a.language(30005), 'content': '',
                          'url': home_url, 'cover_url': a.image('categories.png', image), 'backdrop_url': a.art(),
                          'type': 3},
                         {'site': site, 'mode': 'list', 'title': a.language(30004), 'content': 'search',
                          'url': search_url, 'cover_url': a.image('search.png', image), 'backdrop_url': a.art(),
                          'type': 3}]
            item_list.extend(a.favs_hist_menu(site))
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('div', {'class': 'navi-wrap'}))
            item_list = []
            if soup:
                for item in soup.findAll('a'):
                    if item: item_list.extend([{'site': site, 'mode': 'list', 'url': home_url + item.get('href'),
                                                'content': '', 'title': item.string.encode('UTF-8'),
                                                'cover_url': a.image(image, image), 'backdrop_url': a.art(),
                                                'type': 3}])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                if 'do=search' in params['url']:
                    last_item = re.search('search_start=([0-9]+)', params['url'])
                else:
                    last_item = re.search('/page/([0-9]+)/', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    if 'do=search' in params['url']:
                        page = re.sub(r'(search_start=)([0-9]+)', '\g<01>' + str(item), params['url'])
                        params['url'] = re.sub(r'(result_from=)([0-9]+)', '\g<01>' + str(int(str(item)) * 10 + 1), page)
                    else:
                        params['url'] = re.sub('/page/[0-9]+/', '/page/' + str(item) + '/', params['url'])
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('div', {'id': 'dle-content'}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            if soup:
                xbmcdict = XBMCDict(0).update(params)
                for item in soup.findAll('div', {'class': 'short-item'}):
                    if item:
                        _dict = xbmcdict.copy()
                        _dict['url'] = item.a.get('href')
                        _dict['title'] = item.a.img.get('alt').encode('UTF-8')
                        _dict['tvshowtitle'] = _dict['title']
                        _dict['originaltitle'] = _dict['title']
                        item = home_url + item.a.img.get('src').replace('/thumbs', '')
                        _dict['cover_url'] = a.image(item)
                        _dict['thumb_url'] = _dict['cover_url']
                        _dict['poster'] = _dict['cover_url']
                        _dict['sub_site'] = site

                        item_list.extend([_dict])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('div', {'class': 'bottom-nav'}))
            if soup:
                last_item = len(soup.findAll('a', href=True)) - 1
                for index, item in enumerate(soup.findAll('a', href=True)):
                    page = ''
                    if item:
                        if index == 0 and item.string.encode('UTF-8') != 'Back': last_item -= 1
                        if item.string.encode('UTF-8') == 'Back':
                            if item.get('href') == '#':
                                temp = re.search('.*list_submit\(([0-9]+)\).*', item.get('onclick'))
                                if temp:
                                    page = re.sub(r'(search_start=)([0-9]+)', '\g<01>' + temp.group(1), params['url'])
                                    page = re.sub(r'(result_from=)([0-9]+)',
                                                  '\g<01>' + str(int(temp.group(1)) * 10 + 1), page)
                            else:
                                page = item.get('href')
                            if page:
                                item_list.extend(
                                    [{'site': site, 'mode': 'list', 'url': page, 'content': params['content'],
                                      'title': a.language(30017, True), 'cover_url': a.image('previous.png', image),
                                      'backdrop_url': a.art(), 'type': 3}])
                        if item.string.encode('UTF-8') == 'Next':
                            if item.get('href') == '#':
                                temp = re.search('.*list_submit\(([0-9]+)\).*', item.get('onclick'))
                                if temp:
                                    page = re.sub(r'(search_start=)([0-9]+)', '\g<01>' + temp.group(1), params['url'])
                                    page = re.sub(r'(result_from=)([0-9]+)',
                                                  '\g<01>' + str(int(temp.group(1)) * 10 + 1), page)
                            else:
                                page = item.get('href')
                            if page:
                                item_list.extend(
                                    [{'site': site, 'mode': 'list', 'url': page, 'content': params['content'],
                                      'title': a.language(30018, True), 'cover_url': a.image('next.png', image),
                                      'backdrop_url': a.art(), 'type': 3}])
                        if index == last_item:
                            if item.get('href') == '#':
                                temp = re.search('.*list_submit\(([0-9]+)\).*', item.get('onclick'))
                                if temp:
                                    page = re.sub(r'(search_start=)([0-9]+)', '\g<01>' + temp.group(1), params['url'])
                                    page = re.sub(r'(result_from=)([0-9]+)',
                                                  '\g<01>' + str(int(temp.group(1)) * 10 + 1), page)
                            else:
                                page = item.get('href')
                            if page:
                                item_list.extend([{'site': site, 'mode': 'list', 'url': page, 'content': 'goto',
                                                   'title': a.language(30019, True),
                                                   'cover_url': a.image('goto.png', image),
                                                   'backdrop_url': a.art(), 'type': 3}])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('div', {'class': 'full-text clearfix desc-text'}))
            item = soup.find('a')
            item_list = []
            xbmcdict = XBMCDict(0).update(params)
            if item:
                _dict = xbmcdict.copy()
                _dict['url'] = item.get('href')
                item_list.extend([_dict])
            else:
                a.alert(a.language(30904, True), sound=False)
            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
コード例 #13
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        base_url = 'http://filmikz.ch'
        home_url = base_url + '/index.php?genre=14'
        search_url = home_url + '&search='
        false_positives = ['#']

        if mode == 'main':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': home_url,
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('pg=([0-9]+)', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                last_item = int(last_item / 10)
                item = a.page_input(last_item)
                if item:
                    item = str(int(item) * 10)
                    params['url'] = re.sub('pg=[0-9]+', 'pg=' + str(item),
                                           params['url']).replace(' ', '+')
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'td', {'width': '490'}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            params['sub_site'] = site
            if soup:
                addondict = AddonDict(0).update(params)
                for item in soup.findAll('table', {
                        'width': '100%',
                        'height': '155'
                }):
                    _dict = addondict.copy()
                    ahref = item.find('a', {'href': True})
                    if ahref:
                        url = ahref.get('href')
                        if not url.startswith('http://'):
                            url = base_url + url
                        _dict['url'] = url
                        data = item.find('strong')
                        _dict['title'] = str(data.contents[0]).rstrip(' XXX :')
                        _dict['tvshowtitle'] = _dict['title']
                        _dict['originaltitle'] = _dict['title']
                        img = item.find('img')
                        if img:
                            img = img.get('src')
                            if not img.startswith('http://'):
                                img = base_url + '/' + img
                        else:
                            img = ''
                        _dict['cover_url'] = a.image(img)
                        _dict['thumb_url'] = _dict['cover_url']
                        _dict['poster'] = _dict['cover_url']
                        cast = item.find('p',
                                         text=re.compile('[Ss]tarring:.+'))
                        if cast:
                            _dict['plot'] = str(cast)
                            _dict['plotoutline'] = _dict['plot']
                            cast = re.search('[Ss]tarring:\s*(.+?)\s*\.+',
                                             str(cast))
                            if cast:
                                cast = cast.group(1)
                                _dict['cast'] = cast.split(', ')
                        item_list.extend([_dict])
                pages = BeautifulSoup(html,
                                      parseOnlyThese=SoupStrainer(
                                          'table', {'width': '250'}))
                if pages:
                    previouspage = None
                    nextpage = None
                    lastpage = None
                    for ahref in pages.findAll('a', {'href': True}):
                        astr = ahref.string.encode('utf-8')
                        if astr == '‹‹ ':
                            previouspage = base_url + '/' + ahref.get('href')
                        elif astr == '››':
                            nextpage = base_url + '/' + ahref.get('href')
                        elif astr == ' Last ':
                            lastpage = base_url + '/' + ahref.get('href')
                            last_item = re.search('pg=(-*[0-9]+)',
                                                  str(lastpage))
                            if last_item:
                                last_item = int(last_item.group(1))
                                if last_item < 10:
                                    lastpage = None
                    if previouspage:
                        item_list.extend([{
                            'site':
                            site,
                            'mode':
                            'list',
                            'url':
                            previouspage,
                            'content':
                            params['content'],
                            'title':
                            a.language(30017, True),
                            'cover_url':
                            a.image('previous.png', image),
                            'backdrop_url':
                            a.art(),
                            'type':
                            3
                        }])
                    if nextpage:
                        item_list.extend([{
                            'site':
                            site,
                            'mode':
                            'list',
                            'url':
                            nextpage,
                            'content':
                            params['content'],
                            'title':
                            a.language(30018, True),
                            'cover_url':
                            a.image('next.png', image),
                            'backdrop_url':
                            a.art(),
                            'type':
                            3
                        }])
                    if lastpage:
                        item_list.extend([{
                            'site':
                            site,
                            'mode':
                            'list',
                            'url':
                            lastpage,
                            'content':
                            'goto',
                            'title':
                            a.language(30019, True),
                            'cover_url':
                            a.image('goto.png', image),
                            'backdrop_url':
                            a.art(),
                            'type':
                            3
                        }])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('body'))
            item_list = []
            _bad_hosts = ['NowDownload', 'ePornik']
            if soup:
                buttons = soup.findAll('input', {
                    'type': 'button',
                    'onclick': True
                })
                if buttons:
                    addondict = AddonDict(0).update(params)
                    for button in buttons:
                        value = button.get('value')
                        newhost = re.search('.+?-([a-zA-Z]+)', value)
                        if newhost:
                            newhost = newhost.group(1)
                        else:
                            newhost = ''
                        if newhost not in _bad_hosts:
                            item = button.get('onclick')
                            item = re.sub(
                                'javascript:popUp\([\'"](.+?)[\'"]\);*',
                                '\g<01>', item)
                            item = base_url + item
                            value = button.get('value')
                            if not re.search('[Pp]art ', value):
                                try:
                                    thtml = a.get_page(item)
                                    tsoup = BeautifulSoup(thtml)
                                    source = tsoup.find('frame')
                                    if source:
                                        source = source.get('src')
                                        if 'ads.php' not in source:
                                            _dict = addondict.copy()
                                            _dict['url'] = source
                                            item_list.extend([_dict])
                                except:
                                    continue
                    parts = []
                    oldhost = ''
                    _dict = addondict.copy()
                    _dict['multi-part'] = True
                    for button in buttons:
                        value = button.get('value')
                        newhost = re.search('.+?-([a-zA-Z]+)', value)
                        if newhost:
                            newhost = newhost.group(1)
                        else:
                            newhost = ''
                        if newhost not in _bad_hosts:
                            item = button.get('onclick')
                            item = re.sub(
                                'javascript:popUp\([\'"](.+?)[\'"]\);*',
                                '\g<01>', item)
                            item = base_url + item
                            if re.search('[Pp]art ', value):
                                if oldhost != newhost:
                                    if oldhost != '':
                                        _dict['parts'] = parts
                                        item_list.extend([_dict])
                                        _dict = addondict.copy()
                                        _dict['multi-part'] = True
                                        parts = []
                                    oldhost = newhost

                                try:
                                    thtml = a.get_page(item)
                                    tsoup = BeautifulSoup(thtml)
                                    source = tsoup.find('frame')
                                    if source:
                                        source = source.get('src')
                                        if 'ads.php' not in source:
                                            parts.extend([source])
                                except:
                                    continue
                    if parts:
                        _dict['parts'] = parts
                        item_list.extend([_dict])
            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
コード例 #14
0
ファイル: xvx.py プロジェクト: yam4me/repository.openeleq
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        base_url = 'http://yespornplease.com'
        home_url = base_url + '/index.php'
        popular_url = base_url + '/index.php?p=1&m=today'
        search_url = base_url + '/search.php?q='
        false_positives = ['']

        if mode == 'main':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': home_url,
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30016),
                'content': '',
                'url': popular_url,
                'cover_url': a.image('popular.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'categories',
                'title': a.language(30005),
                'content': '',
                'url': home_url,
                'cover_url': a.image('categories.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'id': 'categories'}))
            item_list = []
            if soup:
                for item in soup.findAll('a'):
                    if item:
                        item_list.extend([{
                            'site':
                            site,
                            'mode':
                            'list',
                            'url':
                            item.get('href').replace(' ', '+'),
                            'content':
                            '',
                            'title':
                            item.string.encode('UTF-8'),
                            'cover_url':
                            a.image(image, image),
                            'backdrop_url':
                            a.art(),
                            'type':
                            3
                        }])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item.replace(' ', '+')
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('p=([0-9]+)', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('p=[0-9]+', 'p=' + str(item),
                                           params['url']).replace(' ', '+')
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'id': 'videos'}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            if soup:
                xbmcdict = XBMCDict(0).update(params)
                for item in soup.findAll('div', {'class': 'video-preview'}):
                    if item:
                        _dict = xbmcdict.copy()
                        temp = item.find('div', {'class': 'jcarousel'}).a
                        if temp:
                            temp = temp.get('href')
                            if not temp.startswith('http://'):
                                temp = base_url + temp
                            _dict['url'] = temp
                            _dict['title'] = item.find('div', {
                                'class': 'preview-title'
                            }).get('title').encode('UTF-8')
                            _dict['tvshowtitle'] = _dict['title']
                            _dict['originaltitle'] = _dict['title']
                            temp = item.find('div', {
                                'class': 'jcarousel'
                            }).img.get('src')
                            if temp.startswith('//'): temp = 'http:' + temp
                            _dict['cover_url'] = a.image(temp)
                            _dict['thumb_url'] = _dict['cover_url']
                            _dict['poster'] = _dict['cover_url']
                            temp = item.find('div', {
                                'class': 'preview-info-box length'
                            }).b.string
                            if temp:
                                temp = re.search('([0-9]+):([0-9]+):([0-9]+)',
                                                 temp)
                                _dict['duration'] = str(
                                    (int(temp.group(1)) * 60 * 60) +
                                    (int(temp.group(2)) * 60) +
                                    int(temp.group(3)))
                            _dict['sub_site'] = site

                            item_list.extend([_dict])

                soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('body'))
                if soup.find('a', {'id': 'prev-page'}):
                    item = soup.find('a', {
                        'id': 'prev-page'
                    }).get('href').replace(' ', '+')
                    if not item.startswith('http://'): item = base_url + item
                    if 'index.php' in params['url']:
                        item = item.replace('search.php', 'index.php')
                    item_list.extend([{
                        'site':
                        site,
                        'mode':
                        'list',
                        'url':
                        item,
                        'content':
                        params['content'],
                        'title':
                        a.language(30017, True),
                        'cover_url':
                        a.image('previous.png', image),
                        'backdrop_url':
                        a.art(),
                        'type':
                        3
                    }])
                if soup.find('a', {'id': 'next-page'}):
                    item = soup.find('a', {
                        'id': 'next-page'
                    }).get('href').replace(' ', '+')
                    if 'index.php' in params['url']:
                        item = item.replace('search.php', 'index.php')
                    if not item.startswith('http://'): item = base_url + item
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': item,
                        'content': params['content'],
                        'title': a.language(30018, True),
                        'cover_url': a.image('next.png', image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])

                soup = BeautifulSoup(html,
                                     parseOnlyThese=SoupStrainer(
                                         'div', {'id': 'pagination'}))
                last_item = False
                if soup:
                    for item in reversed(soup.findAll('a')):
                        last_item = item.get('href')
                        if not last_item.startswith('http://'):
                            last_item = base_url + last_item
                        break
                if last_item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': last_item,
                        'content': 'goto',
                        'title': a.language(30019, True),
                        'cover_url': a.image('goto.png', image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'object', {'id': 'videoContainer'}))
            item_list = []
            if soup:
                item = soup.find('param', {'name': 'flashvars'})
                item = re.search('.*?video_url=(.+?)&.*?', str(item))
                if item: item = item.group(1)
                xbmcdict = XBMCDict(0).update(params)
                if item:
                    _dict = xbmcdict.copy()
                    _dict['url'] = item
                    item_list.extend([_dict])
                else:
                    a.alert(a.language(30904, True), sound=False)
            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
コード例 #15
0
    elif command == "follow":
        playback.play("follow")
        audio_pub.publish("r2d2")
    elif command == "photo":
        pass
    elif command == "bird":
        audio_pub.publish("r2d2")
        playback.play("bird")
    else:
        current_command = ""


if __name__ == "__main__":
    rospy.init_node("behavior", anonymous=True)
    tf_listener = tf.TransformListener()
    playback = Playback()

    rospy.Subscriber("/command", String, command_callback)
    rospy.Subscriber("/joint_states", JointState, js_callback)
    rospy.Subscriber("/opencv_apps/circles", CircleArrayStamped,
                     circle_callback)
    twist_pub = rospy.Publisher("/turtle1/cmd_vel", Twist, queue_size=10)
    joint_pub = rospy.Publisher("/command/joint_states",
                                JointState,
                                queue_size=10)
    audio_pub = rospy.Publisher('/audio', String, queue_size=10)
    stiffen_pub = rospy.Publisher("/stiffen", Int32, queue_size=1)
    print("PYTHON CODE IS READY")
    rate = rospy.Rate(10)
    while not rospy.is_shutdown():
        rospy.sleep(0.1)
コード例 #16
0
ファイル: __init__.py プロジェクト: bialagary/mw
            common.alert(common.language(30914, True))
    elif param == 'clear_cache':
        try:
            common.clear_cache()
        except:
            common.alert(common.language(30916, True))
    elif param == 'clear_search':
        try:
            common.clear_search(params['sub_site'])
        except:
            common.alert(common.language(30920, True))
    elif param == 'separator':
        pass
    elif param == 'play_this':
        from playback import Playback
        Playback().play_this(params.get('url', ''), params['title'],
                             common.image(params.get('cover_url', '')), False,
                             params)
    else:
        is_allowed_access = False
        if common.is_locked():
            is_allowed_access = common.toggle_lock()
        else:
            is_allowed_access = True
        if is_allowed_access:
            try:
                i = __import__(param, fromlist=[''])
                i.Site(params)
            except ImportError:
                common.error(common.language(30907, True) + ' ' + param)
コード例 #17
0
ファイル: audio.py プロジェクト: aregb/cyborg_audio
def main():
    rospy.init_node("cyborg_audio")
    playback = Playback()
    text_to_speech = TextToSpeech()
    rospy.spin()
コード例 #18
0
    def __init__(self, params):
        import re
        import json
        import urllib
        from addon import Addon
        from addondict import AddonDict

        a = Addon()
        site = self.__module__
        mode = params['mode']

        api_version = 'v5'
        recent_url = 'http://beeg.com/api/%s/index/main/0/pc' % api_version
        long_url = 'http://beeg.com/api/%s/index/tag/0/pc?tag=long%svideos' % (api_version, '%20')
        search_url = 'http://beeg.com/api/%s/index/search/0/pc?query=' % api_version
        tag_url = 'http://beeg.com/api/%s/index/tag/0/pc?tag=' % api_version
        img_url = 'http://img.beeg.com/236x177/%s.jpg'

        data_markers = 'data=pc.US'

        if mode == 'main':
            item_list = [{'site': site, 'mode': 'list', 'title': a.language(30003), 'content': '',
                          'url': recent_url, 'cover_url': a.image('recent.png', image), 'backdrop_url': a.art(),
                          'type': 3},
                         {'site': site, 'mode': 'categories', 'title': a.language(30005), 'content': '',
                          'url': recent_url, 'cover_url': a.image('categories.png', image), 'backdrop_url': a.art(),
                          'type': 3},
                         {'site': site, 'mode': 'list', 'title': a.language(30039), 'content': '',
                          'url': long_url, 'cover_url': a.image('longvideos.png', image), 'backdrop_url': a.art(),
                          'type': 3},
                         {'site': site, 'mode': 'list', 'title': a.language(30004), 'content': 'search',
                          'url': search_url, 'cover_url': a.image('search.png', image), 'backdrop_url': a.art(),
                          'type': 3}]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            data = json.loads(html)
            item_list = []
            tags = data.get('tags', None)
            if tags:
                popular = tags.get('popular', None)
                if popular:
                    for item in popular:
                        url_item = re.search('(.+?)-', str(item))
                        if url_item: url_item = url_item.group(1)
                        else: url_item = item
                        item_list.extend([{'site': site, 'mode': 'list', 'url': tag_url + url_item,
                                           'content': '', 'title': str(item).capitalize(),
                                           'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3}])
                nonpopular = tags.get('nonpopular', None)
                if nonpopular:
                    for item in nonpopular:
                        url_item = re.search('(.+?)-', str(item))
                        if url_item: url_item = url_item.group(1)
                        else: url_item = item
                        item_list.extend([{'site': site, 'mode': 'list', 'url': tag_url + urllib.quote(url_item),
                                           'content': '', 'title': str(item).capitalize(),
                                           'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3}])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item.replace(' ', '+')
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('/([0-9]+)/pc', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('/[0-9]+/pc', '/' + str(item) + '/pc', params['url']).replace(' ', '+')
                else:
                    exit(1)
            html = a.get_page(params['url'])
            item_list = []
            data = json.loads(html)
            allvideos = []
            videos = data.get('videos', None)
            if videos:
                for video in videos:
                    nt_name = video.get('nt_name', '').encode('utf-8', 'ignore')
                    ps_name = video.get('ps_name', '').encode('utf-8', 'ignore')
                    atitle = video.get('title', '').encode('utf-8', 'ignore')
                    vid_id = video.get('id', '').encode('utf-8', 'ignore')
                    if nt_name.lower() == 'na': nt_name = ''
                    if ps_name.lower() == 'na': ps_name = ''
                    atitle = '%s - %s' % (atitle, ps_name)
                    if nt_name:
                        atitle += ' (%s)' % nt_name
                    if vid_id:
                        allvideos.append([vid_id, atitle, video])

                if allvideos:
                    params['mode'] = 'play'
                    params['content'] = 'episodes'
                    params['type'] = 0
                    params['context'] = 0
                    params['duration'] = '480'
                    params['sub_site'] = site
                    addondict = AddonDict(0).update(params)

                    for number, name, idata in allvideos:
                        _dict = addondict.copy()
                        _dict['title'] = name
                        _dict['tvshowtitle'] = _dict['title']
                        _dict['originaltitle'] = _dict['title']
                        _dict['cover_url'] = a.image(img_url % number)
                        _dict['thumb_url'] = _dict['cover_url']
                        _dict['poster'] = _dict['cover_url']
                        _dict['url'] = params['url']
                        _dict['count'] = number
                        item_list.extend([_dict])
                    pages = data.get('pages', 0)
                    if pages != 0:
                        pages -= 1
                    page = re.search('/([0-9]+)/pc', params['url'])
                    if page:
                        page = int(page.group(1))
                    else:
                        page = 0
                    previouspage = None
                    nextpage = None
                    lastpage = None
                    if page > 0:
                        previouspage = re.sub('/[0-9]+/pc', '/' + str(page - 1) + '/pc', params['url'])
                    if pages > 1:
                        lastpage = re.sub('/[0-9]+/pc', '/' + str(pages) + '/pc', params['url'])
                    if page < pages:
                        nextpage = re.sub('/[0-9]+/pc', '/' + str(page + 1) + '/pc', params['url'])

                    if previouspage:
                        item_list.extend([{'site': site, 'mode': 'list', 'url': previouspage, 'content': params['content'],
                                           'title': a.language(30017, True), 'cover_url': a.image('previous.png', image),
                                           'backdrop_url': a.art(), 'type': 3}])
                    if nextpage:
                        item_list.extend([{'site': site, 'mode': 'list', 'url': nextpage, 'content': params['content'],
                                           'title': a.language(30018, True), 'cover_url': a.image('next.png', image),
                                           'backdrop_url': a.art(), 'type': 3}])
                    if lastpage:
                        item_list.extend([{'site': site, 'mode': 'list', 'url': lastpage, 'content': 'goto',
                                           'title': a.language(30019, True), 'cover_url': a.image('goto.png', image),
                                           'backdrop_url': a.art(), 'type': 3}])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            data = json.loads(html)
            video = None
            videos = data.get('videos', None)
            if videos:
                for vid in videos:
                    if vid.get('id', None) == params['count']:
                        video = vid
                        break
                if video:
                    img = img_url % video.get('id')
                    name = params['title']
                    url = video.get('720p', None)
                    if not url:
                        url = video.get('480p', None)
                        if not url:
                            url = video.get('240p', None)
                    if url:
                        url = 'http:' + re.sub('\{DATA_MARKERS\}', data_markers, url)
                        from playback import Playback
                        Playback().play_this(url, name, img, a.common.usedirsources())
                    else:
                        a.alert(a.language(30904, True), sound=False)
コード例 #19
0
ファイル: dazn.py プロジェクト: kiliantv/repo
def play_context(data, title):
    from playback import Playback
    items.play_item(Playback(data), name=title, context=True)
コード例 #20
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        base_url = 'https://chaturbate.com'
        home_url = base_url

        false_positives = ['#']

        if mode == 'main':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30021),
                'content': '',
                'url': home_url,
                'cover_url': a.image('featuredcams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'bygender',
                'title': a.language(30017),
                'content': '',
                'cover_url': a.image('bygender.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'byage',
                'title': a.language(30018),
                'content': '',
                'cover_url': a.image('byage.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'byregion',
                'title': a.language(30019),
                'content': '',
                'cover_url': a.image('byregion.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'bystatus',
                'title': a.language(30020),
                'content': '',
                'cover_url': a.image('bystatus.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'bygender':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30022),
                'content': '',
                'url': base_url + '/female-cams/',
                'cover_url': a.image('femalecams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30023),
                'content': '',
                'url': base_url + '/male-cams/',
                'cover_url': a.image('malecams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30024),
                'content': '',
                'url': base_url + '/couple-cams/',
                'cover_url': a.image('couplecams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30025),
                'content': '',
                'url': base_url + '/transsexual-cams/',
                'cover_url': a.image('transcams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'byage':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30026),
                'content': '',
                'url': base_url + '/teen-cams/',
                'cover_url': a.image('teencams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30027),
                'content': '',
                'url': base_url + '/18to21-cams/',
                'cover_url': a.image('18to21cams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30028),
                'content': '',
                'url': base_url + '/20to30-cams/',
                'cover_url': a.image('20to30cams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30029),
                'content': '',
                'url': base_url + '/30to50-cams/',
                'cover_url': a.image('30to50cams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30030),
                'content': '',
                'url': base_url + '/mature-cams/',
                'cover_url': a.image('maturecams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'byregion':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30031),
                'content': '',
                'url': base_url + '/north-american-cams/',
                'cover_url': a.image('north-americancams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30032),
                'content': '',
                'url': base_url + '/other-region-cams/',
                'cover_url': a.image('other-regioncams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30033),
                'content': '',
                'url': base_url + '/euro-russian-cams/',
                'cover_url': a.image('euro-russiancams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30034),
                'content': '',
                'url': base_url + '/philippines-cams/',
                'cover_url': a.image('philippinescams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30035),
                'content': '',
                'url': base_url + '/asian-cams/',
                'cover_url': a.image('asiancams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30036),
                'content': '',
                'url': base_url + '/south-american-cams/',
                'cover_url': a.image('south-americancams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'bystatus':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30037),
                'content': '',
                'url': base_url + '/exhibitionist-cams/',
                'cover_url': a.image('exhibitionistcams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30038),
                'content': '',
                'url': base_url + '/hd-cams/',
                'cover_url': a.image('hdcams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'goto':
                last_item = re.search('page=([0-9]+)', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('page=[0-9]+', 'page=' + str(item),
                                           params['url']).replace(' ', '+')
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div',
                                     {'class': 'c-1 endless_page_template'}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'episodes'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = ''
            params['sub_site'] = site
            if soup:
                ul = soup.find('ul', {'class': 'list'})
                if ul:
                    addondict = AddonDict(0).update(params)
                    for item in ul.findAll('li'):
                        _dict = addondict.copy()
                        clip_link = item.find('a')
                        if clip_link:
                            url = clip_link.get('href')
                            if not url.startswith('http://'):
                                url = base_url + url
                            _dict['url'] = url
                            ctitle = ''
                            cage = ''
                            cname = ''
                            ccams = ''
                            details = item.find('div', {'class': 'details'})
                            if details:
                                temp = details.find('a')
                                if temp:
                                    cname = str(temp.contents[0])
                                temp = details.find(
                                    'span', {'class': re.compile('age.*')})
                                if temp:
                                    cage = temp.string.encode('utf-8')
                                temp = details.find('li', {'class': 'cams'})
                                if temp:
                                    ccams = str(temp.contents[0])
                                temp = details.find('li', {'title': True})
                                if temp:
                                    ctitle = temp.get('title').encode('UTF-8')
                            if cname:
                                usetitle = '%s [%syr, %s] %s' % (cname, cage,
                                                                 ccams, ctitle)
                                _dict['title'] = usetitle
                                _dict['tvshowtitle'] = _dict['title']
                                _dict['originaltitle'] = _dict['title']
                                img = item.find('img')
                                if img:
                                    img = img.get('src')
                                    if img.startswith('//'):
                                        img = 'http:' + img
                                else:
                                    img = ''
                                _dict['cover_url'] = a.image(img)
                                _dict['thumb_url'] = _dict['cover_url']
                                _dict['poster'] = _dict['cover_url']
                                item_list.extend([_dict])

                    pages = BeautifulSoup(html,
                                          parseOnlyThese=SoupStrainer(
                                              'ul', {'class': 'paging'}))
                    if pages:
                        previouspage = pages.find(
                            'a', {'class': re.compile('prev.*')})
                        nextpage = pages.find('a',
                                              {'class': re.compile('next.*')})
                        lastpage = pages.find('span',
                                              {'class': 'endless_separator'})
                        if lastpage:
                            lastpage = lastpage.findNext('a')

                        if previouspage:
                            previouspage = previouspage.get('href').replace(
                                ' ', '+')
                            if previouspage != '#':
                                if not previouspage.startswith('http://'):
                                    previouspage = base_url + previouspage
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    previouspage,
                                    'content':
                                    params['content'],
                                    'title':
                                    a.language(30017, True),
                                    'cover_url':
                                    a.image('previous.png', image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])
                        if nextpage:
                            nextpage = nextpage.get('href').replace(' ', '+')
                            if nextpage != '#':
                                if not nextpage.startswith('http://'):
                                    nextpage = base_url + nextpage
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    nextpage,
                                    'content':
                                    params['content'],
                                    'title':
                                    a.language(30018, True),
                                    'cover_url':
                                    a.image('next.png', image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])
                        if lastpage:
                            lastpage = lastpage.get('href').replace(' ', '+')
                            if lastpage != '#':
                                if not lastpage.startswith('http://'):
                                    lastpage = base_url + lastpage
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    lastpage,
                                    'content':
                                    'goto',
                                    'title':
                                    a.language(30019, True),
                                    'cover_url':
                                    a.image('goto.png', image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            link = re.search('html \+= "src=\'(.+?)\'', html)
            if link:
                from playback import Playback
                Playback().play_this(link.group(1), params['title'],
                                     params['cover_url'],
                                     a.common.usedirsources())
            else:
                a.alert(a.language(30904, True), sound=False)
コード例 #21
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        home_url = 'http://playporn.to/'
        search_url = home_url + '?submit=Search&s='
        movies_url = home_url + 'category/xxx-movie-stream/'
        scenes_url = home_url + 'category/xxx-clips-scenes-stream/'
        false_positives = ['http://playporn.to/deutsche-milfs-anonym-sex/']

        if mode == 'main':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30003),
                'content': '',
                'url': home_url,
                'cover_url': a.image('recent.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'sub',
                'title': a.language(30001),
                'content': '',
                'url': movies_url,
                'cover_url': a.image('movies.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'sub',
                'title': a.language(30002),
                'content': '',
                'url': scenes_url,
                'cover_url': a.image('scenes.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'sub':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': params['url'],
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'category',
                'title': a.language(30005),
                'content': '',
                'url': home_url,
                'cover_url': a.image('categories.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'category':
            index = 1
            if 'scenes' in params['url'].lower(): index = 2
            html = a.get_page(home_url)
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer('ul', 'nav fl'))
            item_list = []
            for item in soup.findAll('ul')[index].findAll({'a': True}):
                item_list.extend([{
                    'site': 'playporn',
                    'mode': 'list',
                    'url': item.get('href'),
                    'content': '',
                    'title': item.contents[0].encode('UTF-8'),
                    'cover_url': a.image(image, image),
                    'backdrop_url': a.art(),
                    'type': 3
                }])
            if item_list:
                a.add_items(item_list)
                a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('/page/([0-9]+)/', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('/page/[0-9]+/',
                                           '/page/' + str(item) + '/',
                                           params['url'])
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('body'))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            xbmcdict = XBMCDict(0).update(params)
            for item in soup.findAll('div', 'photo-thumb-image'):
                if not item.a.get('href') in false_positives:
                    _dict = xbmcdict.copy()
                    if 'scenes' in params['url']:
                        _dict['duration'] = '2700'
                        _dict['content'] = 'episodes'
                    _dict['url'] = item.a.get('href')
                    _dict['title'] = item.a.get('title').encode('UTF-8')
                    _dict['tvshowtitle'] = _dict['title']
                    _dict['originaltitle'] = _dict['title']
                    _dict['cover_url'] = a.image(item.img.get('src'))
                    _dict['thumb_url'] = _dict['cover_url']
                    _dict['poster'] = _dict['cover_url']
                    _dict['sub_site'] = site

                    item_list.extend([_dict])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', 'more_entries'))
            if soup:
                item = soup.find('a', 'previouspostslink')
                if item:
                    item_list.extend([{
                        'site':
                        site,
                        'mode':
                        'list',
                        'url':
                        item.get('href'),
                        'content':
                        params['content'],
                        'title':
                        a.language(30017, True),
                        'cover_url':
                        a.image('previous.png', image),
                        'backdrop_url':
                        a.art(),
                        'type':
                        3
                    }])
                item = soup.find('a', 'nextpostslink')
                if item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': item.get('href'),
                        'content': params['content'],
                        'title': a.language(30018, True),
                        'cover_url': a.image('next.png', image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])
                item = soup.find('a', 'last')
                if item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': item.get('href'),
                        'content': 'goto',
                        'title': a.language(30019, True),
                        'cover_url': a.image('goto.png', image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])
            if item_list:
                a.add_items(item_list)
                a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'id': 'loopedSlider'}))
            soup = soup.find(text=lambda text: isinstance(text, Comment))
            if soup:
                soup = re.sub('&lt;', '<', soup.encode('utf-8'))
                soup = re.sub('&gt;', '>', soup)
                soup = BeautifulSoup(soup,
                                     parseOnlyThese=SoupStrainer(
                                         'div', 'video'))
                if soup:
                    item_list = []
                    xbmcdict = XBMCDict(0).update(params)
                    for item in soup.findAll('iframe'):
                        _dict = xbmcdict.copy()
                        _dict['url'] = item.get('src').replace(
                            'http://playporn.to/stream/all/?file=',
                            '').encode('UTF-8')
                        if 'flashx.tv' in _dict['url'].lower():
                            item = re.search('hash=(.+?)&', _dict['url'])
                            if item:
                                _dict[
                                    'url'] = 'http://flashx.tv/video/' + item.group(
                                        1) + '/'
                        elif 'played.to' in _dict['url'].lower():
                            item = re.search('embed-([a-zA-Z0-9]+?)-.+?html',
                                             _dict['url'])
                            if item:
                                _dict[
                                    'url'] = 'http://played.to/' + item.group(
                                        1)
                        item_list.extend([_dict])
                    if item_list:
                        from playback import Playback
                        Playback().choose_sources(item_list)
                    else:
                        a.alert(a.language(30904, True), sound=False)
                else:
                    a.alert(a.language(30904, True), sound=False)
            else:
                a.alert(a.language(30904, True), sound=False)
コード例 #22
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        home_url = 'http://www.freeomovie.com/'
        movies_url = home_url + 'category/full-movie/'
        scenes_url = home_url + 'category/clips/'
        search_url = home_url + '/?s='
        false_positives = [
            'http://www.freeomovie.com/category/full-movie/',
            'http://www.freeomovie.com/category/clips/'
        ]

        if mode == 'main':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': home_url,
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30001),
                'content': '',
                'url': movies_url,
                'cover_url': a.image('movies.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30002),
                'content': '',
                'url': scenes_url,
                'cover_url': a.image('scenes.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'categories',
                'title': a.language(30005),
                'content': '',
                'url': home_url,
                'cover_url': a.image('categories.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div',
                                     {'class': 'multi-column-taxonomy-list'}))
            item_list = []
            if soup:
                for item in soup.findAll('a'):
                    if item:
                        if item.get('href') not in false_positives:
                            item_list.extend([{
                                'site':
                                site,
                                'mode':
                                'list',
                                'url':
                                item.get('href'),
                                'content':
                                '',
                                'title':
                                item.string.encode('UTF-8'),
                                'cover_url':
                                a.image(image, image),
                                'backdrop_url':
                                a.art(),
                                'type':
                                3
                            }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('/page/([0-9]+)/', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('/page/[0-9]+/',
                                           '/page/' + str(item) + '/',
                                           params['url'])
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'id': 'content'}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            if soup:
                xbmcdict = XBMCDict(0).update(params)
                for item in soup.findAll('div', {'class': 'postbox'}):
                    if item:
                        if item.h2.a.get('href') not in false_positives:
                            _dict = xbmcdict.copy()
                            if scenes_url in params['url']:
                                _dict['duration'] = '1500'
                                _dict['content'] = 'episodes'
                            _dict['url'] = item.h2.a.get('href')
                            _dict['title'] = item.h2.a.get('title').encode(
                                'UTF-8')
                            _dict['tvshowtitle'] = _dict['title']
                            _dict['originaltitle'] = _dict['title']
                            _dict['cover_url'] = a.image(item.img.get('src'))
                            _dict['thumb_url'] = _dict['cover_url']
                            _dict['poster'] = _dict['cover_url']
                            _dict['sub_site'] = site
                            item_list.extend([_dict])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'class': 'wp-pagenavi'}))
            last_item = False
            if soup:
                for item in soup.findAll('a', href=True):
                    if item:
                        if item.get('class') == 'previouspostslink':
                            item_list.extend([{
                                'site':
                                site,
                                'mode':
                                'list',
                                'url':
                                item.get('href'),
                                'content':
                                params['content'],
                                'title':
                                a.language(30017, True),
                                'cover_url':
                                a.image('previous.png', image),
                                'backdrop_url':
                                a.art(),
                                'type':
                                3
                            }])
                        if item.get('class') == 'nextpostslink':
                            item_list.extend([{
                                'site':
                                site,
                                'mode':
                                'list',
                                'url':
                                item.get('href'),
                                'content':
                                params['content'],
                                'title':
                                a.language(30018, True),
                                'cover_url':
                                a.image('next.png', image),
                                'backdrop_url':
                                a.art(),
                                'type':
                                3
                            }])
                        if item.get('class') == 'last':
                            last_item = item.get('href')
                if not last_item:
                    try:
                        if not soup.find('a', {'class': 'nextpostslink'}):
                            last_item = soup.findAll('a',
                                                     href=True)[-1].get('href')
                        else:
                            last_item = soup.findAll('a',
                                                     href=True)[-2].get('href')
                    except:
                        pass
                if last_item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': last_item,
                        'content': 'goto',
                        'title': a.language(30019, True),
                        'cover_url': a.image('goto.png', image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            item_list = []
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'class': 'videosection'}))
            if soup:
                xbmcdict = XBMCDict(0).update(params)
                pages = soup.findAll('li', {'class': re.compile('pg.')})
                if pages:
                    old_li = pages[0].get('class')
                    _dict = xbmcdict.copy()
                    _dict['multi-part'] = True
                    parts = []
                    for li in pages:
                        if old_li != li.get('class'):
                            _dict['parts'] = parts
                            item_list.extend([_dict])
                            _dict = xbmcdict.copy()
                            _dict['multi-part'] = True
                            old_li = li.get('class')
                            parts = []
                        url = re.search('.+myurl=(.+)', li.a.get('href'),
                                        re.IGNORECASE)
                        if url:
                            url = url.group(1)
                            parts.extend([url])
                    if parts:
                        _dict['parts'] = parts
                        item_list.extend([_dict])
                alink = soup.find('a', {'target': '_blank'})
                if alink:
                    alink = alink.get('href')
                    if 'main.exoclick.com' not in alink:
                        _dict = xbmcdict.copy()
                        _dict['url'] = alink
                        item_list.extend([_dict])
                iframes = soup.findAll('iframe', {'src': True})
                if iframes:
                    for iframe in iframes:
                        iframe = iframe.get('src')
                        if 'main.exoclick.com' not in iframe:
                            _dict = xbmcdict.copy()
                            _dict['url'] = iframe
                            item_list.extend([_dict])
                if not item_list:
                    soup = BeautifulSoup(html,
                                         parseOnlyThese=SoupStrainer(
                                             'ul', {'id': 'countrytabs'}))
                    if soup:
                        xbmcdict = XBMCDict(0).update(params)
                        for index, items in enumerate(
                                soup.findAll('a', href=True)):
                            item = ''
                            if not items.get('id') == 'jpg':
                                item = items.get('href')
                                item = re.search('.*myURL\[\]=(.+)$', item,
                                                 re.DOTALL)
                                if item:
                                    item = re.sub('&tab=[0-9]+', '',
                                                  item.group(1))
                                if item:
                                    _dict = xbmcdict.copy()
                                    _dict['url'] = item
                                    _dict['count'] = index
                                    item_list.extend([_dict])
            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
コード例 #23
0
 def playback(self, data, name=False, context=False):
     self.items.play_item(Playback(self.plugin, data), name, context)
コード例 #24
0
ファイル: dazn.py プロジェクト: kiliantv/repo
def playback(data):
    from playback import Playback
    items.play_item(Playback(data))
コード例 #25
0
    def add_note(self, note, input_event):
        # find insertion index of the note
        insert_index = self.index(tkinter.INSERT)

        # if the inputted key is a `, then run the break apart event
        if note == "`":
            self.break_note(insert_index)
            return

        # check if at end of the score
        if int(insert_index.split(".")[1]) == len(self.get(1.0, "end-1c")):  # PROBLEM HERE
            # if it was a keyboard event, there was an attempt to add text to the text area, remove 2 characters
            # if not keyboard, remove one character from the end (just whitespace)
            if input_event:
                string = self.get(1.0, "end-2c")
            else:
                string = self.get(1.0, "end-1c")

            # convert to upper case for easy keyboard parsing
            current_note = note.upper()

            # nuke the current text to make sure nothing i dont want added is added
            self.delete("1.0", tkinter.END)
            self.insert(tkinter.END, string)

            # check validity of octave 2 (only high A is allowed)
            if Variables.octave == 2 and ord(current_note) > 65:
                return

            # check if current note fits in the measure
            if Variables.current_measure_length + (1.0 / float(Variables.note_length)) > Variables.get_measure_length():
                messagebox.showerror("Invalid Configuration", "ERROR: This note is too long to fit in this measure!")
                return

            # check if the note exists in the naturals table (cross-reference keyboard input to see if valid input)
            if current_note in note_lookup.naturals:
                # check if rest, then add the rest symbol from the lookup class if so
                if current_note == "R":
                    append = LookupNote.get_rest(Variables.note_length)

                # if not a rest, append the note from the symbol generation in the lookup class
                else:
                    append = LookupNote.get_note(current_note, Variables.octave, Variables.note_length,
                                                 Variables.accidental, Variables.key_sig)
                # insert note
                self.insert(tkinter.END, append)
            else:
                return

            # play tone so the user knows what they just added
            Playback.play_tone(current_note, Variables.octave, Variables.accidental)

            # update the current measure length
            Variables.current_measure_length += 1.0 / float(Variables.note_length)

            # compare to see if the measure length was just finished, if so add a measure line
            if math.isclose(Variables.get_measure_length(), Variables.current_measure_length):
                Variables.current_measure_length = 0.0
                self.insert(tkinter.END, "!=")

            # save the new text so it can be compared again if needed on next input event
            self.save_last_text()
        else:  # note is not at the end of the score
            # get the current character at the insertion index
            split = insert_index.split(".")
            next_index = split[0] + "." + str(int(split[1]) + 1)
            character = self.get(insert_index, next_index)

            # if the note is one that can be replaced
            if LookupNote.replaceable(character):
                # parse the insertion index
                first_half = self.get(1.0, split[0] + "." + str(int(split[1]) - 1))
                second_half = self.get(split[0] + "." + str(int(split[1]) + 2), "end-1c")

                # generate new note from the lookup note class using current settings
                new_note = LookupNote.get_note(note.upper(), Variables.octave, LookupNote.get_note_length(character),
                                               Variables.accidental, Variables.key_sig)

                # if there is an accidental symbol, remove it
                if 208 <= ord(first_half[-1]) <= 254:
                    first_half = first_half[0:len(first_half) - 1]

                # delete the old text, insert the new generated text with the new symbol in the insertion index
                self.delete(1.0, tkinter.END)
                self.insert(tkinter.END, first_half + new_note + second_half)

                # play the tone so they know what they inputted
                Playback.play_tone(note.upper(), Variables.octave, Variables.accidental)

                # save the text for future comparisons
                self.save_last_text()
            else:  # cancel the event if replacement is impossible
                self.cancel_event()
コード例 #26
0
    def __init__(self, params):
        import re
        import urllib2
        from addon import Addon
        from addondict import AddonDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        base_url = 'http://urbanhentai.com'
        home_url = base_url
        search_url = base_url + '/?s='
        false_positives = ['#']

        if mode == 'main':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': home_url,
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'categories',
                'title': a.language(30005),
                'content': '',
                'url': home_url,
                'cover_url': a.image('categories.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'li', {'id': 'menu-item-4538'}))
            item_list = []
            if soup:
                genre_list = soup.find('ul', {'class': 'sub-menu'})
                if genre_list:
                    for item in soup.findAll('a'):
                        if item.get('href') not in false_positives:
                            item_list.extend([{
                                'site':
                                site,
                                'mode':
                                'list',
                                'url':
                                item.get('href').replace(' ', '+'),
                                'content':
                                '',
                                'title':
                                item.string.encode('UTF-8'),
                                'cover_url':
                                a.image(image, image),
                                'backdrop_url':
                                a.art(),
                                'type':
                                3
                            }])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item.replace(' ', '+')
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('/page/([0-9]+)/', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('/page/[0-9]+/',
                                           '/page/' + str(item) + '/',
                                           params['url']).replace(' ', '+')
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div',
                                     {'class': re.compile('loop-content.*')}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'episodes'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '1500'
            params['sub_site'] = site
            if soup:
                addondict = AddonDict(0).update(params)
                for item in soup.findAll('div',
                                         {'id': re.compile('post-[0-9]+')}):
                    _dict = addondict.copy()
                    clip_link = item.find('a', {'class': 'clip-link'})
                    if clip_link:
                        url = clip_link.get('href')
                        if not url.startswith('http://'):
                            url = base_url + url
                        _dict['url'] = url
                        try:
                            _dict['title'] = clip_link.get('title').encode(
                                'UTF-8')
                        except:
                            data = item.find('h2', {'class': 'entry-title'})
                            if data:
                                _dict['title'] = str(data.a.contents[0])
                        _dict['tvshowtitle'] = _dict['title']
                        _dict['originaltitle'] = _dict['title']
                        img = item.find('img')
                        if img:
                            img = img.get('src')
                            if img.startswith('//'):
                                img = 'http:' + img
                        else:
                            img = ''
                        _dict['cover_url'] = a.image(img)
                        _dict['thumb_url'] = _dict['cover_url']
                        _dict['poster'] = _dict['cover_url']
                        item_list.extend([_dict])
                pages = BeautifulSoup(html,
                                      parseOnlyThese=SoupStrainer(
                                          'div', {'class': 'wp-pagenavi'}))
                if pages:
                    previouspage = pages.find('a',
                                              {'class': 'previouspostslink'})
                    nextpage = pages.find('a', {'class': 'nextpostslink'})
                    lastpage = pages.find('a', {'class': 'last'})

                    if previouspage:
                        previouspage = previouspage.get('href').replace(
                            ' ', '+')
                        item_list.extend([{
                            'site':
                            site,
                            'mode':
                            'list',
                            'url':
                            previouspage,
                            'content':
                            params['content'],
                            'title':
                            a.language(30017, True),
                            'cover_url':
                            a.image('previous.png', image),
                            'backdrop_url':
                            a.art(),
                            'type':
                            3
                        }])
                    if nextpage:
                        nextpage = nextpage.get('href').replace(' ', '+')
                        item_list.extend([{
                            'site':
                            site,
                            'mode':
                            'list',
                            'url':
                            nextpage,
                            'content':
                            params['content'],
                            'title':
                            a.language(30018, True),
                            'cover_url':
                            a.image('next.png', image),
                            'backdrop_url':
                            a.art(),
                            'type':
                            3
                        }])
                    if lastpage:
                        lastpage = lastpage.get('href').replace(' ', '+')
                        item_list.extend([{
                            'site':
                            site,
                            'mode':
                            'list',
                            'url':
                            lastpage,
                            'content':
                            'goto',
                            'title':
                            a.language(30019, True),
                            'cover_url':
                            a.image('goto.png', image),
                            'backdrop_url':
                            a.art(),
                            'type':
                            3
                        }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div',
                                     {'class': re.compile('entry-content.*')}))
            item_list = []
            if soup:
                item = re.search('file\s*:\s*[\'"](.+?)[\'"]',
                                 str(soup.contents[0]))
                if item:
                    item = item.group(1)
                    if base_url in item:
                        try:
                            opener = urllib2.build_opener()
                            opener.addheaders = [
                                ('User-agent',
                                 'Mozilla/5.0 (Windows NT 10.0; WOW64) '
                                 'AppleWebKit/537.36 (KHTML, like Gecko) '
                                 'Chrome/45.0.2454.101 Safari/537.36')
                            ]
                            opener.addheaders = [('Referer', params['url'])]
                            opener.addheaders = [
                                ('Accept', 'text/html,application/xhtml+xml,'
                                 'application/xml;q=0.9,image/webp,*/*;q=0.8')
                            ]
                            urllib2.install_opener(opener)
                            item = urllib2.urlopen(item).geturl()
                        except urllib2.HTTPError as e:
                            if item != e.geturl():
                                item = e.geturl()
                            else:
                                item = None
                    if item:
                        addondict = AddonDict(0).update(params)
                        _dict = addondict.copy()
                        _dict['url'] = item
                        item_list.extend([_dict])
            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
コード例 #27
0
                print >>logfile, "%s #%i failed."%(mouse,tr+1);logfile.flush()

elif mode == 'single':
    if actions in ['track','both']:
        print >>logfile, "Tracking %s #%i"%(mouse,n);logfile.flush()
        mt = MouseTracker(mouse=mouse, n=n, data_dir=data_dir, diff_thresh=diff_thresh, resample=resample_t, point_mode=point_finding_mode)
        mt.run(show=show, wait=ms_bt_frames, start_pos=start_position)
    if actions in ['mark','both']:
        print >>logfile, "Marking %s #%i"%(mouse,n);logfile.flush()
        m = Marker(mouse=mouse, n=n, data_dir=data_dir, mark_mode=mark_mode)
        mres = m.run(resample=resample_m, start_time=start_time)
        if not mres[0]:
            print >>logfile, "WARNING: Marking for %s #%i declared a warning: %s"%(mouse,n,mres[1]);logfile.flush()
    if actions == 'play':
        print >>logfile, "Playing %s #%i"%(mouse,n);logfile.flush()
        pb = Playback(ymaze_path=mouse, ymaze_n=n, data_dir=data_dir)
        pb.play(draw=show_tracking)
    
elif mode == 'collect':
    mice = [m for m in os.listdir(data_dir) if exclude_word not in m.lower() and m[0]!='.' and 'summary' not in m]

    rows = []
    for mouse in mice:
        print >>logfile, mouse;logfile.flush()
        fh = FileHandler(data_dir, mouse, n=1)
        for tr in xrange(fh.get_n_trials_wbehav()):
            fhm = FileHandler(data_dir, mouse, n=tr+1)
            data = np.load(fhm.make_path('behaviour.npz'))
            dic = dict(mouse=mouse, n=tr+1, score=data['score'], time_to_correct=data['time_to_correct'], distance=data['distance'], start_time=data['start_time'])
            rows.append(dic)