Esempio n. 1
0
def get_stats(player):
	unirest.timeout(3)
	try:
		response = unirest.get(player['URL']).body;
	except Exception as e:
		print 'ERROR: %s for url: %s' % (str(e), player['URL'])
		return player
	player['SUCCESS'] = True
	soup = BeautifulSoup(response);
	player['NAME'] = soup.find_all('h1')[1].contents[0].encode('utf-8')
	results = soup.find_all('tr', {'class':'oddrow'})
	for result in results:
		if result.contents[0].contents[0] == '2014 Regular Season':
			wr_stats = result.contents[1:] 
			try:	
				player['YDS'] = int(wr_stats[2].contents[0].replace(',', ''))
			except Exception as e:
				player['YDS'] = 0
			try:
				player['TD'] = int(wr_stats[5].contents[0].replace(',', ''))
			except Exception as e:
				player['TD'] = 0
			return player
	player['YDS'] = 0
	player['TD'] = 0
	return player
Esempio n. 2
0
def PutData(isbnurls):
    """ data """
    spidername = 'ShaishufangAmazon'
    cnt = 0
    for url in isbnurls:
        print cnt, '-->', url
        cnt += 1
        unirest.timeout(180)
        response = unirest.get(url, headers={"Accept":"application/json"}) # handle url = baseurl + isbn
        try:
            #bookdt
            bookdt = response.body['isbn']
            bookdt['spider'] = spidername
            #data style
            datadict = {}
            datadict['datas'] = [bookdt]
            #put datadict
            unirest.timeout(180)
            resdata = unirest.put(
                            "http://192.168.100.3:5000/data",
                            headers={ "Accept": "application/json", "Content-Type": "application/json" },
                            params=json.dumps(datadict)
                         )
        except:
            pass
        if ((cnt%80)==0):
            time.sleep(3)
Esempio n. 3
0
    def __init__(self, url=None):
        if url:
            # retrieve with post method, put for create, get for read, delete for delete
            # unvisitedurls http://localhost:5000/unvisitedurls?start=0&offset=10&spider=douban
            unirest.timeout(180)
            req = unirest.post(url, headers={"Accept":"application/json"})
            self.start_urls = [data['url'] for data in req.body['data']]
            self.name = url[url.find('spider=')+7:]

            self.visitedurldict = OrderedDict()
            self.datadict       = OrderedDict()
            self.filedict       = OrderedDict()
            self.deadurldict    = OrderedDict()

            self.visitedurldict['urls'] = []
            self.datadict['datas']      = []
            self.filedict['files']      = []
            self.deadurldict['urls']    = []

            rules = (
                Rule(sle(allow=("http://book.douban.com/isbn/\d+$")), callback="parse", follow=True),
                Rule(sle(allow=("http://book.douban.com/subject/\d+$")), callback="parse", follow=True),
            )
        # def __del__(self) work
        dispatcher.connect(self.spider_closed, signals.spider_closed)
Esempio n. 4
0
def get_stats(player):
	unirest.timeout(3)
	try:
		response = unirest.get(player['URL']).body;
	except Exception as e:
		print 'ERROR: %s for url: %s' % (str(e), player['URL'])
		player['SUCCESS'] = False
		return player
	player['SUCCESS'] = True
	soup = BeautifulSoup(response);
	player['NAME'] = soup.find_all('h1')[1].contents[0].encode('utf-8')
	results = soup.find_all('tr', {'class':'oddrow'})
	for result in results:
		if result.contents[0].contents[0] == '2014 Regular Season':
			qb_stats = result.contents[1:] 
			try:	
				player['YDS'] = int(qb_stats[2].contents[0].replace(',', ''))
			except Exception as e:
				player['YDS'] = 0
			try:
				player['TD'] = int(qb_stats[5].contents[0].replace(',', ''))
			except Exception as e:
				player['TD'] = 0
			return player
	player['YDS'] = 0
	player['TD'] = 0
	return player
Esempio n. 5
0
def get_corpus_score(close_sentences):
    closest_sentences = []
    # print close_sentences
    for line in close_sentences:
        unirest.timeout(10)
        try:
            response = unirest.get("https://twinword-sentiment-analysis.p.mashape.com/analyze/?text=%s" % line,
            headers={
            "X-Mashape-Key": "ur8eDH4fVCmshtOozaz1zoWSjS79p1U8IGljsnA2aJAoTuh4Fc",
            "Accept": "application/json"
            }
            )
        except Exception, e:
            # print "exception thrown"
            continue
        if response.code != 200:
            continue
        t = response.body
        score = t['score']
        if 0.05 < score < 0.5:
            score = 1
        elif 0.5 < score < 2.0:
            score = 2
        elif score > 2.0:
            score = 3
        elif -0.05 < score < 0.5:
            score = 0
        elif -0.5 < score < -0.05:
            score = -1
        elif -0.5 < score < -2.0:
            score = -2
        else:
            score = -3
        closest_sentences.append((score, line))
    def test_cities_correctValues(self, NumOfCities=5):
        #------------------------------------------------
        # NumOfCities: How many cities you want to test?
        #------------------------------------------------
        for counter in range(0,NumOfCities):
            # Generate a random index
            picked_index = randint(0, len(self.cities))
            picked_city = self.cities[picked_index]

            # processing the value of city
            if (picked_city != " "):
                # - Remove spaces at beginning or end of the string
                picked_city.strip()
                # - Replace spaces to +
                picked_city = picked_city.replace(" ", "+")
            else:
                logging.warning("The value of city is empty")
            # End if
            
            http_response = self.http_request(picked_city) 
            unirest.timeout(5)
            logging.debug(http_response.code)
            logging.debug(http_response.headers)
            logging.debug(http_response.body)
            
            if (http_response.code == 200):
                test_result = self.parsing_http_body(http_body=http_response.body)
                if (test_result):
                    logging.info("PositiveTest: Test for " + picked_city + " has PASSED\n")
                else:
                    logging.info("PositiveTest: Test for " + picked_city + " has FAILED\n")
                # End if
            else:
                logging.info("PositiveTest: Test for " + picked_city + " has FAILED\n")
Esempio n. 7
0
    def __init__(self, url=None):

        #print "here i am"
        if url:
            # retrieve with post method, put for create, get for read, delete for delete
            # unvisitedurls http://localhost:5000/unvisitedurls?start=0&offset=10&spider=6w
            unirest.timeout(180)
            req = unirest.post(url, headers={"Accept":"application/json"})
            self.start_urls = [data['url'] for data in req.body['data']]
            self.name = url[url.find('spider=')+7:]

            self.visitedurldict = OrderedDict()
            self.datadict       = OrderedDict()
            self.filedict       = OrderedDict()
            self.deadurldict    = OrderedDict()

            self.visitedurldict['urls'] = []
            self.datadict['datas']      = []
            self.filedict['files']      = []
            self.deadurldict['urls']    = []

            rules = (
                Rule(sle(allow=("http://book.douban.com/isbn/\d+$")), callback="parse", follow=True),
                Rule(sle(allow=("http://book.douban.com/subject/\d+$")), callback="parse", follow=True),
            )
        # def __del__(self) work
        dispatcher.connect(self.spider_closed, signals.spider_closed)
def callDownloadApk():
    '''
    url triming
    url = request._get_body_string().split('=')
    requestBody= url[1]
    :return:
    '''

    #Setting timeout as unirest calls get timed out because analysis takes time
    unirest.timeout(600000)
    requestBody = request._get_body_string()

    #Config reading
    if platform.system().lower() == "windows":
        db_config_file = os.path.join(os.path.dirname(sys.executable), 'androbugs-db.cfg')
    else:
        db_config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'androbugs-db.cfg')

    if not os.path.isfile(db_config_file):
        print("[ERROR] AndroBugs Framework DB config file not found: " + db_config_file)
        traceback.print_exc()

    configParser = SafeConfigParser()
    configParser.read(db_config_file)

    downloadPath=configParser.get('General_Config', 'DownloadSciptPath')

    #Calling the download apk method
    cmd = 'python '+downloadPath+'download.py ' + requestBody
    print "cmd is: "+cmd
    os.system(cmd)
    #responseBase = unirest.post("http://localhost:8080/analyseApk", headers={ "Accept": "application/json" },
                                       #body={requestBody})
    return callAnalyseApk(requestBody+".apk")
def putUnvisitedUrls(data):
    url = 'http://127.0.0.1:5000/unvisitedurls'
    unirest.timeout(180) # time out 180 same with scrapy download middlewares
    res = unirest.put(url, headers=Headers, params=json.dumps(data))

    if res.body['code'] != 200:
        return False

    return len(res.body['data'])
def putDeadUrls(data):
    url = BaseUrl + 'deadurls'
    unirest.timeout(180) # time out 180 same with scrapy download middlewares
    res = unirest.put(url, headers=Headers, params=json.dumps(data))

    if res.body['code'] != 200:
        return False

    return True
Esempio n. 11
0
 def run(self):
     unirest.timeout(100)
     GetRequestTest.start_time = time.time()
     for i in range(self.number_of_requests):
         url = "http://0.0.0.0:5000/predict"
         body = "test" + str(i)
         print "Request: ", url, body
         unirest.get(url,
                     params={"text": body},
                     callback=GetRequestTest.callback_function)
Esempio n. 12
0
    def test_timeout(self):
        unirest.timeout(3)
        response = unirest.get('http://httpbin.org/delay/1')
        self.assertEqual(response.code, 200)

        unirest.timeout(1)
        try:
            response = unirest.get('http://httpbin.org/delay/3')
            self.fail("The timeout didn't work")
        except:
            pass
Esempio n. 13
0
    def test_timeout(self):
        unirest.timeout(3)
        response = unirest.get("http://httpbin.org/delay/1")
        self.assertEqual(response.code, 200)

        unirest.timeout(1)
        try:
            response = unirest.get("http://httpbin.org/delay/3")
            self.fail("The timeout didn't work")
        except:
            pass
Esempio n. 14
0
def get_header(url, OutQueue1):
    #sets unirest timeout to 10 seconds
    unirest.timeout(10)
    #sets headers
    headers = {"Accept": "application/json"}
    #gets url from Queue
    SingleUrl = url.get()
    # call get service with headers and params and store result
    result = unirest.get(SingleUrl, headers=headers)
    #put results in the output queue
    OutQueue1.put(result.headers)
    #signals task done
    url.task_done()
Esempio n. 15
0
def listen_commit_status(client, user_key, queue):
    try:
        url = "http://" + etcd_member[0] + ":4001/v2/keys" + user_key + "?wait=true"
        unirest.timeout(20)
        etcd_response = unirest.get(url)
        new_item = etcd_response.body['node']
        # TODO: should be exported to new thread
        print "listen_commit_status: url: " + url + " New Key: " + new_item['key'] + " New Value: " + new_item['value']
        ret_val = new_item['value']
    # To catch the timeout Exception form unirest when cloud does not answer
    except Exception, e:
        print "listen_commit_status: Timeout exception: " + url
        ret_val = "0"
Esempio n. 16
0
def generate_wordcloud(word_list):

    mashape_key = os.environ.get('XMASHAPEKEY')
    if mashape_key != None:
        Unirest.timeout(20)
        print >>sys.stderr, str(timezone.now()) + " Start Word Cloud Generation: " + word_list
        response = Unirest.post("https://gatheringpoint-word-cloud-maker.p.mashape.com/index.php",
                                headers={"X-Mashape-Key": mashape_key},
                                params={"config": "n/a", "height": 500, "textblock": word_list, "width": 800}
                                )
        print >>sys.stderr, str(timezone.now()) + " Finish Word Cloud Generation: " + word_list
        if response.code == 200:
            return save_url(response.body['url'], 'media/wordcloud_images')
    return None
Esempio n. 17
0
def listen_ready_ack(client, user_key, platform, queue):
    cloud_hoster_local = cloud_hoster
    try:
        url = "http://" + etcd_member[0] + ":4001/v2/keys" + user_key + "?wait=true"
        print "listen_ready_ack: wait url: " + url
        unirest.timeout(20)
        etcd_response = unirest.get(url)
        new_item = etcd_response.body['node']
        # TODO: should be exported to new thread
        print "listen_ready_ack: platform: " + platform + " New Key: " + new_item['key']
        cloud_hoster_local[platform][0] = True
    # To catch the timeout Exception form unirest when cloud does not answer
    except Exception, e:
        print "listen_ready_ack: Timeout exception platform: " + platform
        cloud_hoster_local[platform][0] = False
def retrieveUnvisitedUrls(start, offset, spider):
    url = BaseUrl + 'unvisitedurls?start=' + str(start) + '&offset=' + str(offset) + '&spider=' + spider
    unirest.timeout(180) # time out 180 same with scrapy download middlewares
    res = unirest.post(url, headers=Headers)
    if res.body['code'] != 200:
        return []

    if len(res.body['data']) == 0:
        return []

    urls = []
    for item in res.body['data']:
        urls.append(str(item['url']))

    return urls
Esempio n. 19
0
def listen_commit_status(client, user_key, queue):
    try:
        url = "http://" + etcd_member[
            0] + ":4001/v2/keys" + user_key + "?wait=true"
        unirest.timeout(20)
        etcd_response = unirest.get(url)
        new_item = etcd_response.body['node']
        # TODO: should be exported to new thread
        print "listen_commit_status: url: " + url + " New Key: " + new_item[
            'key'] + " New Value: " + new_item['value']
        ret_val = new_item['value']
    # To catch the timeout Exception form unirest when cloud does not answer
    except Exception, e:
        print "listen_commit_status: Timeout exception: " + url
        ret_val = "0"
    def delete_media_by_id(self,
                           id):
        """Does a DELETE request to /media/{id}.

        Delete media results. It returns the status of the operation.

        Args:
            id (string): The id of the media.

        Returns:
            MediaByIdResponse: Response from the API. 

        Raises:
            APIException: When an error occurs while fetching the data from
                the remote API. This exception includes the HTTP Response
                code, an error message, and the HTTP body that was received in
                the request.

        """
        # The base uri for api requests
        query_builder = Configuration.BASE_URI
 
        # Prepare query string for API call
        query_builder += "/media/{id}"

        # Process optional template parameters
        query_builder = APIHelper.append_url_with_template_parameters(query_builder, { 
            "id": id
        })

        # Validate and preprocess url
        query_url = APIHelper.clean_url(query_builder)

        #append custom auth authorization
        CustomAuthUtility.appendCustomAuthParams(headers)

        # Prepare and invoke the API call request to fetch the response
        unirest.timeout(20)
        response = unirest.delete(query_url, headers=headers)

        # Error handling using HTTP status codes
        if response.code < 200 or response.code > 206:  # 200 = HTTP OK
            print response.body
            raise APIException("HTTP Response Not OK", response.code, response.body) 
    
        # Try to cast response to desired type
        if isinstance(response.body, dict):
            print "Media ID Deleted"
Esempio n. 21
0
def listen_ready_ack(client, user_key, platform, queue):
    cloud_hoster_local = cloud_hoster
    try:
        url = "http://" + etcd_member[
            0] + ":4001/v2/keys" + user_key + "?wait=true"
        print "listen_ready_ack: wait url: " + url
        unirest.timeout(20)
        etcd_response = unirest.get(url)
        new_item = etcd_response.body['node']
        # TODO: should be exported to new thread
        print "listen_ready_ack: platform: " + platform + " New Key: " + new_item[
            'key']
        cloud_hoster_local[platform][0] = True
    # To catch the timeout Exception form unirest when cloud does not answer
    except Exception, e:
        print "listen_ready_ack: Timeout exception platform: " + platform
        cloud_hoster_local[platform][0] = False
    def create_media(self,
                     source=None):
        """Does a POST request to /media.

        Create a new media object to be processed.

        Args:
            source (string, optional): The source URL of the media.

        Returns:
            MediaResponse: Response from the API. 

        Raises:
            APIException: When an error occurs while fetching the data from
                the remote API. This exception includes the HTTP Response
                code, an error message, and the HTTP body that was received in
                the request.

        """
        # The base uri for api requests
        query_builder = Configuration.BASE_URI
 
        # Prepare query string for API call
        query_builder += "/media"

        # Process optional query parameters
        query_parameters = {
            "source": source,
            "timeout": Configuration.api_timeout
        }
        query_builder = APIHelper.append_url_with_query_parameters(query_builder, query_parameters)

        # Validate and preprocess url
        query_url = APIHelper.clean_url(query_builder)

        #append custom auth authorization
        CustomAuthUtility.appendCustomAuthParams(headers)

        def post_callback(response):
            self.delete_media_by_id(MediaByIdResponse(**response.body).id)
            print MediaByIdResponse(**response.body).frames

        # Prepare and invoke the API call request to fetch the response
        unirest.timeout(20)
        response = unirest.post(query_url, headers=headers, callback=post_callback)
Esempio n. 23
0
def get_stats(player):
	unirest.timeout(3)
	try:
		response = unirest.get(player['URL']).body;
	except Exception as e:
		print 'ERROR: %s for url: %s' % (str(e), player['URL'])
		player['SUCCESS'] = False
		return player
	soup = BeautifulSoup(response);

	player['NAME'] = soup.find_all('h1')[1].contents[0].encode('utf-8')
	results = soup.find_all('tr', {'class': 'evenrow'}) + soup.find_all('tr', {'class': 'oddrow'})

	count = 0
	i = 0
	while (count < 2 and i < len(results)):
		if results[i].contents[0].contents[0] == '\'14-\'15':
			count += 1
		i += 1
	i -= 1

	try:
		stats = results[i].contents
	except IndexError as e:
		player['PTS'] = 0
		player['AST'] = 0
		player['REB'] = 0
		return player
	
	
	try:
		player['PTS'] = int(stats[16].contents[0].replace(',', ''))
	except Exception as e:
		player['PTS'] = 0
	try:
		player['AST'] = int(stats[11].contents[0].replace(',', ''))
	except Exception as e:
		player['AST'] = 0
	try:
		player['REB'] = int(stats[10].contents[0].replace(',', ''))
	except Exception as e:
		player['REB'] = 0
	player['SUCCESS'] = True
	return player
Esempio n. 24
0
def get_stats(player):
    unirest.timeout(3)
    try:
        response = unirest.get(player['URL']).body
    except Exception as e:
        print 'ERROR: %s for url: %s' % (str(e), player['URL'])
        player['SUCCESS'] = False
        return player
    soup = BeautifulSoup(response)

    player['NAME'] = soup.find_all('h1')[1].contents[0].encode('utf-8')
    results = soup.find_all('tr', {'class': 'evenrow'}) + soup.find_all(
        'tr', {'class': 'oddrow'})

    count = 0
    i = 0
    while (count < 2 and i < len(results)):
        if results[i].contents[0].contents[0] == '\'14-\'15':
            count += 1
        i += 1
    i -= 1

    try:
        stats = results[i].contents
    except IndexError as e:
        player['PTS'] = 0
        player['AST'] = 0
        player['REB'] = 0
        return player

    try:
        player['PTS'] = int(stats[16].contents[0].replace(',', ''))
    except Exception as e:
        player['PTS'] = 0
    try:
        player['AST'] = int(stats[11].contents[0].replace(',', ''))
    except Exception as e:
        player['AST'] = 0
    try:
        player['REB'] = int(stats[10].contents[0].replace(',', ''))
    except Exception as e:
        player['REB'] = 0
    player['SUCCESS'] = True
    return player
Esempio n. 25
0
def listen_ack_written_data(client, user_key, platform, queue):
    # counter = 0
    receive_result = {platform: [False]}
    try:
        url = "http://" + etcd_member[0] + ":4001/v2/keys" + user_key + "?wait=true"
        print "listen_ack_written_data: wait url: " + url
        unirest.timeout(20)
        etcd_response = unirest.get(url)
        new_item = etcd_response.body['node']
        # TODO: should be exported to new thread
        print "listen_ack_written_data: platorm: " + platform + " New Key: " + new_item['key'] + " New Value: " + new_item['value']
        if new_item['value'] == "3":
            receive_result[platform][0] = True
        else:
            receive_result[platform][0] = False
    # To catch the timeout Exception form unirest when cloud does not answer
    except Exception, e:
        print "listen_ack_written_data: Timeout exception platform: " + platform
        receive_result[platform][0] = False
Esempio n. 26
0
def listen_ack_etcd(client, user_key, platform, queue, ack_num):
    print "listen_ack_etcd: listen for ack_num: " + ack_num + " from platform: " + platform
    # counter = 0
    receive_result = {platform: [False]}
    
    try:
        url = "http://" + etcd_member[0] + ":4001/v2/keys" + user_key + "?wait=true"
        print "listen_ack_etcd: wait url: " + url
        unirest.timeout(10000)
        etcd_response = unirest.get(url)
        new_item = etcd_response.body['node']
        print "listen_ack_etcd: platorm: " + platform + " New Key: " + new_item['key'] + " New Value: " + new_item['value']
        if new_item['value'] == ack_num:
            receive_result[platform][0] = True
        else:
            receive_result[platform][0] = False
    # To catch the timeout Exception form unirest when cloud does not answer
    except Exception, e:
        print "listen_ack_etcd: Timeout exception platform: " + platform
        receive_result[platform][0] = False
 def test_cities_incorrectValues(self):
     cities_with_incorrectValues = ["",  "++$$$", "#$#@$2er"]
     for city in cities_with_incorrectValues:            
         http_response = self.http_request(city) 
         unirest.timeout(5)
         logging.debug(http_response.code)
         logging.debug(http_response.headers)
         logging.debug(http_response.body)
         
         if (http_response.code == 200):
             test_result = self.parsing_http_body(http_body=http_response.body)
             if (not test_result):
                 logging.info("NegativeTest: Test for " + city + " has returned an error.\n"+
                              "So, Test for " + city + " has PASSED\n")
             else:
                 logging.info("NegativeTest: Test for " + city + " has returned no error.\n"+
                               "So, Test for " + city + " has FAILED\n")
             # End if
         else:
             logging.info("NegativeTest: Test for " + city + " has FAILED\n")
Esempio n. 28
0
def listen_ack_written_data(client, user_key, platform, queue):
    # counter = 0
    receive_result = {platform: [False]}
    try:
        url = "http://" + etcd_member[
            0] + ":4001/v2/keys" + user_key + "?wait=true"
        print "listen_ack_written_data: wait url: " + url
        unirest.timeout(20)
        etcd_response = unirest.get(url)
        new_item = etcd_response.body['node']
        # TODO: should be exported to new thread
        print "listen_ack_written_data: platorm: " + platform + " New Key: " + new_item[
            'key'] + " New Value: " + new_item['value']
        if new_item['value'] == "3":
            receive_result[platform][0] = True
        else:
            receive_result[platform][0] = False
    # To catch the timeout Exception form unirest when cloud does not answer
    except Exception, e:
        print "listen_ack_written_data: Timeout exception platform: " + platform
        receive_result[platform][0] = False
Esempio n. 29
0
def generate_wordcloud(word_list):

    mashape_key = os.environ.get('XMASHAPEKEY')
    if mashape_key != None:
        Unirest.timeout(20)
        print >> sys.stderr, str(
            timezone.now()) + " Start Word Cloud Generation: " + word_list
        response = Unirest.post(
            "https://gatheringpoint-word-cloud-maker.p.mashape.com/index.php",
            headers={"X-Mashape-Key": mashape_key},
            params={
                "config": "n/a",
                "height": 500,
                "textblock": word_list,
                "width": 800
            })
        print >> sys.stderr, str(
            timezone.now()) + " Finish Word Cloud Generation: " + word_list
        if response.code == 200:
            return save_url(response.body['url'], 'media/wordcloud_images')
    return None
Esempio n. 30
0
def callDownloadApk():
    '''
    url triming
    url = request._get_body_string().split('=')
    requestBody= url[1]
    :return:
    '''

    #Setting timeout as unirest calls get timed out because analysis takes time
    unirest.timeout(600000)
    #requestBody = request._get_body_string()
    preference = request.forms.get('preference')
    requestBody = request.forms.get('packageName')

    # #Config reading
    # if platform.system().lower() == "windows":
    #     db_config_file = os.path.join(os.path.dirname(sys.executable), 'androbugs-db.cfg')
    # else:
    #     db_config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'androbugs-db.cfg')
    #
    # if not os.path.isfile(db_config_file):
    #     print("[ERROR] AndroBugs Framework DB config file not found: " + db_config_file)
    #     traceback.print_exc()
    #
    # configParser = SafeConfigParser()
    # configParser.read(db_config_file)

    # downloadPath=configParser.get('General_Config', 'DownloadSciptPath')
    directory = os.getcwd() + "/Download/"
    if not os.path.exists(directory):
        os.makedirs(directory)
    downloadPath = os.getcwd() + "/googleplay-api/"

    #Calling the download apk method
    cmd = 'python ' + downloadPath + 'download.py ' + requestBody
    logging.info("cmd is: " + cmd)
    os.system(cmd)
    #responseBase = unirest.post("http://localhost:8080/analyseApk", headers={ "Accept": "application/json" },
    #body={requestBody})
    return callAnalyseApk(requestBody + ".apk", preference)
Esempio n. 31
0
def callDownloadApk():
    '''
    url triming
    url = request._get_body_string().split('=')
    requestBody= url[1]
    :return:
    '''

    #Setting timeout as unirest calls get timed out because analysis takes time
    unirest.timeout(600000)
    #requestBody = request._get_body_string()
    preference = request.forms.get('preference')
    requestBody=request.forms.get('packageName')

    # #Config reading
    # if platform.system().lower() == "windows":
    #     db_config_file = os.path.join(os.path.dirname(sys.executable), 'androbugs-db.cfg')
    # else:
    #     db_config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'androbugs-db.cfg')
    #
    # if not os.path.isfile(db_config_file):
    #     print("[ERROR] AndroBugs Framework DB config file not found: " + db_config_file)
    #     traceback.print_exc()
    #
    # configParser = SafeConfigParser()
    # configParser.read(db_config_file)

    # downloadPath=configParser.get('General_Config', 'DownloadSciptPath')
    directory = os.getcwd() +"/Download/"
    if not os.path.exists(directory):
        os.makedirs(directory)
    downloadPath = os.getcwd() + "/googleplay-api/"

    #Calling the download apk method
    cmd = 'python '+downloadPath+'download.py ' + requestBody
    logging.info("cmd is: "+cmd)
    os.system(cmd)
    #responseBase = unirest.post("http://localhost:8080/analyseApk", headers={ "Accept": "application/json" },
                                       #body={requestBody})
    return callAnalyseApk(requestBody+".apk",preference)
Esempio n. 32
0
    def upload(access_token, file_path, field_extract=True):
        """Request a document from the Signnow API  by document id using unirest

        Args:
            access_token (str): The access token for a user account you want to upload the document to.
            file_path (str): The file path to the document you want to upload to signnow.
            field_extract (bool): A boolean indicating whether or not to extract field tags in the document.

        Returns:
            dict: The JSON response from the API which includes the id of the document uploaded.
                or the error returned.
        """
        timeout(60)
        response = post(Config().get_base_url() + '/document' , headers={
            "Authorization": "Bearer " + access_token
        }, params={
            "file": open(file_path, mode="r"),
            "client_timestamp": datetime.now().strftime("%s"),
            "check_fields": field_extract
        })

        return response.body
Esempio n. 33
0
    def upload_with_complex_tags(access_token, file_path, payload):
        """Request a document from the Signnow API  by document id using unirest

        Args:
            access_token (str): The access token for a user account you want to upload the document to.
            file_path (str): The file path to the document you want to upload to signnow.
            payload (list(dict)): A list of the tag data dicts to build the document with
            format:
            {
                "tag_name": "DateofBirth",
                "role":"COAPP",
                "label":"Date_of_Birth",
                "required":true,
                "type":"text",
                "prefilled_text":"something",
                "height":15,
                "width":100,
                "validator_id":"13435fa6c2a17f83177fcbb5c4a9376ce85befeb"
            }

        Returns:
            dict: The JSON response from the API which includes the id of the document uploaded.
                or the error returned.
        """
        timeout(60)
        response = post(
            Config().get_base_url() + "/document/fieldextract",
            headers={
                "Authorization": "Bearer " + access_token,
                "Content-Type": "multipart/form-data",
            },
            params={
                "file": open(file_path, mode="r"),
                "client_timestamp": datetime.now().strftime("%s"),
                "Tags": dumps(payload),
            },
        )

        return response.body
Esempio n. 34
0
def generate_wordcloud(word_list):
    word_cloud_key = os.environ.get('XMASHAPEKEY')
    if word_cloud_key is not None:
        timeout = 25
        Unirest.timeout(timeout)
        word_list = word_list.lower()
        fixed_asp = "FALSE"
        rotate = "FALSE"
        word_count = len(word_list.split())
        if word_count < 20:
            fixed_asp = "TRUE"
            rotate = "TRUE"
        print >> sys.stderr, str(timezone.now()) + " Start Word Cloud Generation: " + word_list
        response = Unirest.post("https://www.teamtempapp.com/wordcloud/api/v1.0/generate_wc",
                                headers={"Content-Type": "application/json", "Word-Cloud-Key": word_cloud_key},
                                params=json.dumps({"textblock": word_list, "height": 500, "width": 600, "s_fit": "TRUE",
                                                   "fixed_asp": fixed_asp, "rotate": rotate})
                                )
        print >> sys.stderr, str(timezone.now()) + " Finish Word Cloud Generation: " + word_list
        if response.code == 200:
            return save_url(response.body['url'], 'wordcloud_images')
    return None
Esempio n. 35
0
    def upload(access_token, file_path, field_extract=True):
        """Request a document from the Signnow API  by document id using unirest

        Args:
            access_token (str): The access token for a user account you want to upload the document to.
            file_path (str): The file path to the document you want to upload to signnow.
            field_extract (bool): A boolean indicating whether or not to extract field tags in the document.

        Returns:
            dict: The JSON response from the API which includes the id of the document uploaded.
                or the error returned.
        """
        timeout(60)
        response = post(Config().get_base_url() + '/document',
                        headers={"Authorization": "Bearer " + access_token},
                        params={
                            "file": open(file_path, mode="r"),
                            "client_timestamp": datetime.now().strftime("%s"),
                            "check_fields": field_extract
                        })

        return response.body
Esempio n. 36
0
def listen_ack_etcd(client, user_key, platform, queue, ack_num):
    print "listen_ack_etcd: listen for ack_num: " + ack_num + " from platform: " + platform
    # counter = 0
    receive_result = {platform: [False]}

    try:
        url = "http://" + etcd_member[
            0] + ":4001/v2/keys" + user_key + "?wait=true"
        print "listen_ack_etcd: wait url: " + url
        unirest.timeout(10000)
        etcd_response = unirest.get(url)
        new_item = etcd_response.body['node']
        print "listen_ack_etcd: platorm: " + platform + " New Key: " + new_item[
            'key'] + " New Value: " + new_item['value']
        if new_item['value'] == ack_num:
            receive_result[platform][0] = True
        else:
            receive_result[platform][0] = False
    # To catch the timeout Exception form unirest when cloud does not answer
    except Exception, e:
        print "listen_ack_etcd: Timeout exception platform: " + platform
        receive_result[platform][0] = False
def getDatas(spider, start, offset):
    rtv = {}
    url = 'http://192.168.100.3:5000/data?start=' + str(start) + '&offset=' + str(offset) + '&spider=' + spider
    unirest.timeout(180)
    res = unirest.get(url, headers=Headers)

    if res.body['code'] != 200:
        return False

    rtv['spider'] = spider
    rtv['start'] = start
    rtv['offset'] = offset
    rtv['datas'] = []
    for item in res.body['data']:
        tmpRtv = {}
        if item['data'].has_key('ISBN'):
            tmpRtv['ISBN'] = item['data']['ISBN']
        else:
            continue
        # 书籍购买来源
        tmpRtv['data'] = item['data'][u'\u4e66\u7c4d\u8d2d\u4e70\u6765\u6e90']
        rtv['datas'].append(tmpRtv)
    return rtv
Esempio n. 38
0
def updateTwits():
    def bs(record):
        if record is None:
            return 0
        else:
            return 1 if record['basic'] == "Bullish" else -1
    print("Updating Twits!!")
    with open('static/sp100.json', 'rb') as f:
        ls = json.load(f)
        url = "https://api.stocktwits.com/api/2/streams/symbol/{}.json?access_token={}"
        for i in ls:
            unirest.timeout(200)
            response = unirest.get(url.format(
                i['name'], ACCESS_TOKEN))
            data = response.body
            msgs = data['messages']
            # print("Updating", i['name'])
            # print(db.twits.count())
            for msg in msgs:
                time = datetime.strptime(
                    msg['created_at'], "%Y-%m-%dT%H:%M:%SZ")
                utc = pytz.utc
                item = {
                    'name': msg['user']['username'],
                    'body': msg['body'],
                    'id': msg['id'],
                    'time': utc.localize(time).astimezone(pytz.timezone('US/Eastern')).strftime('%Y-%m-%d %H:%M:%S'),
                    'symbols': [i['symbol'] for i in msg['symbols']],
                    'reshares': msg['reshares']['reshared_count'],
                    'bs': bs(msg['entities']['sentiment'])}
                try:
                    db.twits.replace_one(item, item, True)
                except pymongo.errors.DuplicateKeyError:
                    pass

    print('Collection Twits Update.')
    return Response('Collection Twits Updated.')
def callDownloadApk():
    '''
    url triming
    url = request._get_body_string().split('=')
    requestBody= url[1]
    :return:
    '''

    #Setting timeout as unirest calls get timed out because analysis takes time
    unirest.timeout(600000)
    requestBody = request._get_body_string()

    #Config reading
    if platform.system().lower() == "windows":
        db_config_file = os.path.join(os.path.dirname(sys.executable),
                                      'androbugs-db.cfg')
    else:
        db_config_file = os.path.join(
            os.path.dirname(os.path.abspath(__file__)), 'androbugs-db.cfg')

    if not os.path.isfile(db_config_file):
        print("[ERROR] AndroBugs Framework DB config file not found: " +
              db_config_file)
        traceback.print_exc()

    configParser = SafeConfigParser()
    configParser.read(db_config_file)

    downloadPath = configParser.get('General_Config', 'DownloadSciptPath')

    #Calling the download apk method
    cmd = 'python ' + downloadPath + 'download.py ' + requestBody
    print "cmd is: " + cmd
    os.system(cmd)
    #responseBase = unirest.post("http://localhost:8080/analyseApk", headers={ "Accept": "application/json" },
    #body={requestBody})
    return callAnalyseApk(requestBody + ".apk")
Esempio n. 40
0
    def __init__(self, options=None, **kwargs):
        global CLIENT_ID, CLIENT_SECRET, BASE_URL
        dicts = [options or {}, kwargs]

        for d in dicts:
            for k, v in d.iteritems():
                kwargs.setdefault(k, v)

        if len(kwargs):
            for k, v in kwargs.iteritems():
                setattr(self, k, v)

            if "client_id" in kwargs:
                CLIENT_ID = kwargs["client_id"]
            if "client_secret" in kwargs:
                CLIENT_SECRET = kwargs["client_secret"]
            if "base_url" in kwargs:
                BASE_URL = kwargs["base_url"]
            if "timeout" in kwargs:
                unirest.timeout(kwargs["timeout"])
        else:
            self.client_id = CLIENT_ID
            self.client_secret = CLIENT_SECRET
            self.base_url = BASE_URL
Esempio n. 41
0
    parser.add_argument("--debug",
                        help="debug mode, even more verbose",
                        action="store_true")
    parser.add_argument("--mctsmoms",
                        help="number of MCTS mothers",
                        type=int,
                        default=0)
    args = parser.parse_args()

    if args.verbose:
        print "verbose mode\nargs={}".format(args)

    #
    # http defaults
    #
    unirest.timeout(30)
    unirest.default_header("Accept", "application/json")

    #
    # Clear the database?
    #
    if args.cleardb:
        unirest.timeout(600)
        exec_http_get("{}/module/testing/clearDatabase".format(args.server))
        unirest.timeout(30)
        exec_http_get("{}/module/testing/createSubscriptionPacks".format(
            args.server))
        import_region_domain_data("state")
        import_region_domain_data("circle")
        import_region_domain_data("district")
        import_domain_data("languageLocationCode/import",
Esempio n. 42
0
def connect_to_db():
    try:
        return psql.connect(
            "dbname='fooddatabase' user='******' host='localhost' password='******' port='5433'"
        )
    except:
        e = traceback.format_exc(0)
        print e
        print "unable to connect"
        exit(1)


for i in range(1, 4000):
    print "{} requests".format(i)
    try:
        unirest.timeout(10)
        response = unirest.get(
            "https://spoonacular-recipe-food-nutrition-v1.p.mashape.com/recipes/random?limitLicense=false&number=1",
            headers={
                "X-Mashape-Key":
                "BuyjFV6xLqmshAVbK0ppDXmdXM0Jp1KsUhYjsnltPjvvB9mODp",
                "X-Mashape-Host":
                "spoonacular-recipe-food-nutrition-v1.p.mashape.com"
            })
        filename = 'json_objects/{}.json'.format(
            re.sub(r'[-:. ]', '', str(datetime.datetime.now())))
        # a = json.loads(response.body);
        data = json.loads(response.raw_body)

        # Update local file with json
        with open(filename, 'w') as f:
Esempio n. 43
0
    def spider_closed(self, spider):
        """
        Put visitedurldict, datadict, filedict,  deadurldict to Master.
        Format:
        visitedurldict['urls'] = [ {'url':'', 'spider':self.name},  {'url':'', 'spider':self.name} ]

        datadict['datas']      = [ {'url':'', 'data':{}, 'spider':self.name},  {'url':'', 'data':{}, 'spider':self.name} ]

        filedict['files']      = [ {'url':'', 'head':'', 'body':'', 'spider':self.name},  {'url':'', 'head':'', 'body':'', 'spider':self.name} ]

        deadurldict['urls']    = [ {'url':'', 'spider':self.name},  {'url':'', 'spider':self.name} ]
        """
        lenOfdeadUrls = len(self.deadurldict['urls'])
        logging.info('spidername ' + self.name + '!!!')
        logging.info('visitedurls' + str(len(self.visitedurldict['urls'])))
        logging.info('datadict   ' + str(len(self.datadict['datas'])))
        logging.info('filedict   ' + str(len(self.filedict['files'])))
        logging.info('deadurls   ' + str(len(self.deadurldict['urls'])))

        if (lenOfdeadUrls==10):
            unirest.timeout(180)
            resdeadurl = unirest.put(
                            "http://192.168.100.3:5000/deadurls",
                            headers={ "Accept": "application/json", "Content-Type": "application/json" },
                            params=json.dumps(self.deadurldict)
                        )

        elif(lenOfdeadUrls==0):
            unirest.timeout(180)
            resvisitedurl = unirest.put(
                            "http://192.168.100.3:5000/visitedurls",
                            headers={ "Accept": "application/json", "Content-Type": "application/json" },
                            params=json.dumps(self.visitedurldict)
                        )
            unirest.timeout(180)
            resdata = unirest.put(
                            "http://192.168.100.3:5000/data",
                            headers={ "Accept": "application/json", "Content-Type": "application/json" },
                            params=json.dumps(self.datadict)
                         )
            unirest.timeout(180)
            resfile = unirest.put(
                            "http://192.168.100.3:5000/file",
                            headers={ "Accept": "application/json", "Content-Type": "application/json" },
                            params=json.dumps(self.filedict)
                         )

        else:# lenOfdeadUrls in (0,10)
            unirest.timeout(180)
            resvisitedurl = unirest.put(
                            "http://192.168.100.3:5000/visitedurls",
                            headers={ "Accept": "application/json", "Content-Type": "application/json" },
                            params=json.dumps(self.visitedurldict)
                        )
            unirest.timeout(180)
            resdata = unirest.put(
                            "http://192.168.100.3:5000/data",
                            headers={ "Accept": "application/json", "Content-Type": "application/json" },
                            params=json.dumps(self.datadict)
                         )
            unirest.timeout(180)
            resfile = unirest.put(
                            "http://192.168.100.3:5000/file",
                            headers={ "Accept": "application/json", "Content-Type": "application/json" },
                            params=json.dumps(self.filedict)
                         )
            unirest.timeout(180)
            resdeadurl = unirest.put(
                            "http://192.168.100.3:5000/deadurls",
                            headers={ "Accept": "application/json", "Content-Type": "application/json" },
                            params=json.dumps(self.deadurldict)
                        )
Esempio n. 44
0
    return ''.join(saved_chars)


with open(r'F:\pROJECTS\scopus\files\CORE_2014.csv', 'rb') as csv_file:
    reader = csv.reader(csv_file, delimiter=',')
    for row in reader:
        conference_title = row[1]
        remove_text_inside_brackets(conference_title)
        queryString = 'conf(%s)' % conference_title
        url = 'https://api.elsevier.com:443/content/search/scopus?query=%s&apiKey=%s' % (
            queryString, SCOPUS_API_KEY)
        list_of_requests.append(url)

import json


def callback_function(response):
    if response.code != 200:
        print json.loads(
            response.raw_body)['service-error']['status']['statusText']
    else:
        print json.loads(
            response.raw_body)['search-results']['entry'][0]['dc:title']


unirest.timeout(25)
for url in list_of_requests:
    thread = unirest.get(url,
                         headers={"Accept": "application/json"},
                         callback=callback_function)
Esempio n. 45
0
#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.

import httplib2
import unirest

from common import log
from common import conf

LOG = log.getLogger(__name__)
CONF = conf.read_conf()

unirest.timeout(60)


class OfpmClient:
    def set_flow(self, dpid, inPort, srcMac, dstMac):
        header = {'Content-type': 'application/json'}
        body = {
            'dpid': "{:0>16x}".format(dpid),
            'inPort': inPort,
            'srcMac': srcMac,
            'dstMac': dstMac
        }
        LOG.debug("body = " + str(body))
        LOG.info("Request setFlow body = " + str(body))
        res = unirest.post(CONF.ofpm_set_flow_url,
                           headers=header,
Esempio n. 46
0
# Kavitha Madhavaraj #

from bs4 import BeautifulSoup
import json
import requests
from requests import Session
import time
import string
import unirest
import httplib

links = []
unirest.timeout(20)


def extractKeywords(data):
    words = data.split(",")
    return words


def find_metadata(url):
    try:
        r = requests.get(url)
    except requests.exceptions.ConnectionError as e:
        yield null
        yield null
    data = r.text
    soup = BeautifulSoup(data, "html.parser")
    if soup.find(attrs={"name": "author"}):
        yield unicode(soup.find(attrs={"name": "author"})['content']).replace(
            "\r", " ").replace("\n",
Esempio n. 47
0
import unirest
import json

if (__name__=='__main__'):

    #!< amazonURL.json
    with open('./amazonURL.json', 'rb') as f5:
        uvdict = json.load(f5)
    f5.close()

    uvlist = uvdict['urls']
    print len(uvlist), uvlist[0]

    unirest.timeout(180)
    resunvisitedurl = unirest.put(
                    "http://192.168.100.3:5000/unvisitedurls",
                    headers={ "Accept": "application/json", "Content-Type": "application/json" },
                    params=json.dumps(uvdict)
                )
Esempio n. 48
0
	if count==1:
		count=jatha-1
		for v in range(0,jatha):
			roll_no=start_roll_no+v
			url='http://oa.cc.iitk.ac.in:8181/Oa/Jsp/OAServices/IITk_SrchRes1.jsp?typ=stud&numtxt='+str(roll_no)+'&sbm=Y'
			unirest.get(url,callback=callme)
			print 'init ', roll_no, v
	elif count<total:
		roll_no=start_roll_no+count
		url='http://oa.cc.iitk.ac.in:8181/Oa/Jsp/OAServices/IITk_SrchRes1.jsp?typ=stud&numtxt='+str(roll_no)+'&sbm=Y'
		unirest.get(url,callback=callme)
		print 'sent for', roll_no

start_roll_no=input('Enter first roll no:')
last_roll_no=input('Enter last roll no:')
file_name=raw_input('File in which data should be stored [default is data.json]:')
if file_name=='':
	file_name='data.json'
elif len(file_name.split('.'))<2:
	file_name+='.json'

unirest.timeout(50000)

if start_roll_no>last_roll_no:
	tmp=start_roll_no
	start_roll_no=last_roll_no
	last_roll_no=tmp
total = last_roll_no - start_roll_no+1
you_handle()

# print 'fetching data of '+str(last_roll_no - start_roll_no+1)+' persons'
Esempio n. 49
0
                    count[kind] = 0
                break
        else:  # character is not a bracket
            if not any(count):  # outside brackets
                saved_chars.append(character)
    return ''.join(saved_chars)


with open(r'F:\pROJECTS\scopus\files\CORE_2014.csv', 'rb') as csv_file:
    reader = csv.reader(csv_file, delimiter=',')
    for row in reader:
        conference_title = row[1]
        remove_text_inside_brackets(conference_title)
        queryString = 'conf(%s)' % conference_title
        url = 'https://api.elsevier.com:443/content/search/scopus?query=%s&apiKey=%s' % (queryString, SCOPUS_API_KEY)
        list_of_requests.append(url)

import json


def callback_function(response):
    if response.code != 200:
        print json.loads(response.raw_body)['service-error']['status']['statusText']
    else:
        print json.loads(response.raw_body)['search-results']['entry'][0]['dc:title']


unirest.timeout(25)
for url in list_of_requests:
    thread = unirest.get(url, headers={"Accept": "application/json"}, callback=callback_function)
Esempio n. 50
0
# coding:utf-8
__author__ = 'guoling'
import sys
import os
import logging
import json

import unirest

import config
unirest.timeout(45)

reload(sys)
sys.setdefaultencoding('utf-8')
os.environ['NLS_LANG'] = 'SIMPLIFIED CHINESE_CHINA.UTF8'


class AsyncQueryTools:
    def __init__(self):
        self._logger = logging.getLogger('default')

    # 不懂 此方法
    def get_db_data(self, zzj_raw, pro_name, out_param_type_list):
        """
        获取调用存储过程返回的结果集
        :param zzj_raw:
        :param pro_name:
        :return:
        """
        local_data = None
        if zzj_raw["code"] == 0:
#Read in reviews from API.

# How do we use sentiment analysis?  (Determine the subject(predicate) for each sentence.)  (Determine sentiment for that subject.)  - Figure out how connected certain words are.

import unirest

unirest.timeout(45);

url = "https://byroredux-metacritic.p.mashape.com/user-reviews?page_count=5&url=http%3A%2F%2Fwww.metacritic.com%2Fgame%2F"
url2 = "https://byroredux-metacritic.p.mashape.com/user-reviews?page_count=5&url=";
games_to_test = []#['Minecraft', 'World of Warcraft', 'Diablo III', 'Half-Life 2', 'Starcraft', 'The Sims 3', 'Guild Wars', 'Myst', 'Riven', 'Far Cry', 'The Witcher', 'Spore', 'Quake', 'American McGee\'s Alice', 'Dungeon Siege', 'Duke Nukem', 'BioShock', 'Frogger', 'Hotel Giant']


reviewers = {}
games = {}

def loadGames(file):
	import json
	f = open(file, 'r')
	temp = json.loads(f.read())
	for g in temp:
		games_to_test.append(g[0])


#New import
def addReviewersNonSelective(game, game_url, name):
	response = unirest.get(url2 + game_url,
	  headers={
	    "X-Mashape-Key": "Ug4hutuQNzmshzdMN8dNqV6v7Yi8p10pmmejsnKJl5NdrIzRMP"
	  }
	)
Esempio n. 52
0
    def spider_closed(self, spider):
        """
        Put visitedurldict, datadict, filedict,  deadurldict to Master.
        Format:
        visitedurldict['urls'] = [ {'url':'', 'spider':'douban'},  {'url':'', 'spider':'douban} ]

        datadict['datas']      = [ {'url':'', 'data':{}, 'spider':'douban'},  {'url':'', 'data':{}, 'spider':'douban} ]

        filedict['files']      = [ {'url':'', 'head':'', 'body':'', 'spider':'douban'},  {'url':'', 'head':'', 'body':'', 'spider':'douban} ]

        deadurldict['urls']    = [ {'url':'', 'spider':'douban'},  {'url':'', 'spider':'douban} ]
        """
        #scrapy crawl douban -a url='http://192.168.100.3:5000/unvisitedurls?start=0&offset=10&spider=douban'
        lenOfdeadUrls = len(self.deadurldict['urls'])
        logging.info('spider name:' + self.name                             )
        logging.info('visitedurls:' + str(len(self.visitedurldict['urls'])) )
        logging.info('datadict   :' + str(len(self.datadict['datas']))      )
        logging.info('filedict   :' + str(len(self.filedict['files']))      )
        logging.info('deadurls   :' + str(len(self.deadurldict['urls']))    )

        if (lenOfdeadUrls==10):
            unirest.timeout(180)
            resdeadurl = unirest.put(
                            "http://192.168.100.3:5000/deadurls",
                            headers={ "Accept": "application/json", "Content-Type": "application/json" },
                            params=json.dumps(self.deadurldict)
                        )

        elif(lenOfdeadUrls==0):
            unirest.timeout(180)
            resvisitedurl = unirest.put(
                            "http://192.168.100.3:5000/visitedurls",
                            headers={ "Accept": "application/json", "Content-Type": "application/json" },
                            params=json.dumps(self.visitedurldict)
                        )

            unirest.timeout(180)
            resdata = unirest.put(
                            "http://192.168.100.3:5000/data",
                            headers={ "Accept": "application/json", "Content-Type": "application/json" },
                            params=json.dumps(self.datadict)
                         )
            unirest.timeout(180)
            resfile = unirest.put(
                            "http://192.168.100.3:5000/file",
                            headers={ "Accept": "application/json", "Content-Type": "application/json" },
                            params=json.dumps(self.filedict)
                         )

        else:# lenOfdeadUrls in (0,10)
            unirest.timeout(180)
            resvisitedurl = unirest.put(
                            "http://192.168.100.3:5000/visitedurls",
                            headers={ "Accept": "application/json", "Content-Type": "application/json" },
                            params=json.dumps(self.visitedurldict)
                        )
            unirest.timeout(180)
            resdata = unirest.put(
                            "http://192.168.100.3:5000/data",
                            headers={ "Accept": "application/json", "Content-Type": "application/json" },
                            params=json.dumps(self.datadict)
                         )
            unirest.timeout(180)
            resfile = unirest.put(
                            "http://192.168.100.3:5000/file",
                            headers={ "Accept": "application/json", "Content-Type": "application/json" },
                            params=json.dumps(self.filedict)
                         )
            unirest.timeout(180)
            resdeadurl = unirest.put(
                            "http://192.168.100.3:5000/deadurls",
                            headers={ "Accept": "application/json", "Content-Type": "application/json" },
                            params=json.dumps(self.deadurldict)
                        )
import httplib2
import unirest

from common import log
from common import conf

LOG = log.getLogger(__name__)
CONF = conf.read_conf()

unirest.timeout(60)

class OfpmClient:

	def set_flow(self, dpid, inPort, srcMac, dstMac):
		dpidStr = (hex(dpid))
		header = {'Content-type':'application/json'}
		body = {'dpid':dpidStr[2:], 'inPort':inPort, 'srcMac':srcMac, 'dstMac':dstMac}
		LOG.debug("body = " + str(body))
		LOG.info("Request setFlow body = " + str(body))
		res = unirest.post(CONF.ofpm_set_flow_url, headers=header, params=str(body), callback=self.__http_response__)
		return 

	def init_flow(self, dpid):
		dpidStr = (hex(dpid))
		header = {'Content-type':'application/json'}
		body = {'datapathId':dpidStr[2:]}
		LOG.debug("body = " + str(body))
		LOG.info("Request initFlow body = " + str(body))
		res = unirest.post(CONF.ofpm_init_flow_url, headers=header, params=str(body), callback=self.__http_response__)
		return
Esempio n. 54
0
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from datetime import datetime
import unirest
from matplotlib import rcParams

unirest.timeout(15)  # 5s timeout

RAPIDAPI_KEY = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
RAPIDAPI_HOST = "apidojo-yahoo-finance-v1.p.rapidapi.com"

symbol_string = ""
inputdata = {}


def fetchStockData(symbol):

    response = unirest.get(
        "https://apidojo-yahoo-finance-v1.p.rapidapi.com/market/get-charts?region=US&lang=en&symbol="
        + symbol + "&interval=1d&range=3mo",
        headers={
            "X-RapidAPI-Host": RAPIDAPI_HOST,
            "X-RapidAPI-Key": RAPIDAPI_KEY,
            "Content-Type": "application/json"
        })

    if (response.code == 200):
        return response.body
    else:
        return None
def getData():
    #Importing the packages
    from bs4 import BeautifulSoup
    import unirest
    #Import read and modify cloudant database functions defined in db_action.py
    from db_action import dbLoadNewArticle,getLastSavedDbState
   
    firstTime = True
    hasBeenUpdated = False
    page_number=1  
    #Modify unirest HTTP request timeout for slow connection purposes
    unirest.timeout(90)
    try:
        #Make HTTP request to phantomjs cloud through the rapidapi platform by providing phantomjs cloud api key, rapidapi key and the target URL
        response = unirest.get("https://phantomjscloud.p.rapidapi.com/single/browser/v1/your-phantomjscloud-api-key/?requestType=text&targetUrl="+"https://mwebantu.news/category/business/",
                headers={
                    "X-RapidAPI-Key": "your-rapidapi-key"
                }
            )
        #Parse the response to HTML using lxml parser.
        #Create Beautifulsoup object to navigate the HTML tags of the parsed response
        soup_obj1 = BeautifulSoup(response.body,"lxml")
        #Create loop to scrap data on the first page and second page respectively
        while(page_number<=2):
            #Get the div tag holding all the news articles
            news_articles_container = soup_obj1.find('div',{'class':'td-ss-main-content'})
            #Get a list of all the news articles in the main content div tag
            news_articles_list = news_articles_container.find_all('div',{'class':'td_module_10 td_module_wrap td-animation-stack'})
            #Loop through the articles list 
            for news_article in news_articles_list:
                #Verify if the database has been modified to get it's most recent state after modification
                if firstTime or hasBeenUpdated:    
                    news_articles = getLastSavedDbState()
                    firstTime = False
                    hasBeenUpdated = False
                
                #Instantiate an empty python dictionary
                article=dict()

                #Get article data by navigating to the tags holding the data
                #Affect retrieved data to the python dictionary object 
                article["article_title"] = news_article.find('div',{'class':'item-details'}).find('a').getText().upper()
                article["article_date"] = news_article.find('time').get('datetime')
                article["article_image_url"] = news_article.find('div',{'class':'td-module-thumb'}).find('img').get('src')
                article["article_full_story_url"] = news_article.find('div',{'class':'item-details'}).find('a').get('href')
                article["article_summarised"] = news_article.find('div',{'class':'td-excerpt'}).getText()
                
                #Verify if the current article already exists in the database.
                #If it does'nt exist add it to the database
                if article["article_title"] not in news_articles:
                    dbLoadNewArticle(article)
                    hasBeenUpdated = True
            #Increment value of page_number by one to get data of the second page on the next loop            
            page_number+=1

            #Make HTTP request to get the second page containing news articles
            response = unirest.get("https://phantomjscloud.p.rapidapi.com/single/browser/v1/your-phantomjscloud-api-key/?requestType=text&targetUrl="+"https://mwebantu.news/category/business/page/"+str(page_number)+"/",
                headers={
                    "X-RapidAPI-Key": "your-rapidapi-key"
                }
            ) 
            #Parse the response  
            soup_obj1 = BeautifulSoup(response.body,"lxml")
        return True
    except:
        return False
Esempio n. 56
0
 def test_timeout(self):
   unirest.timeout(2)
   response = unirest.get("http://google.com")
   self.assertEqual(response.code, 200)
Esempio n. 57
0
    parser.add_argument("--verbose", help="verbose mode", action="store_true")
    parser.add_argument("--cleardb", help="clear database", action="store_true")
    parser.add_argument("--debug", help="debug mode, even more verbose", action="store_true")
    parser.add_argument("--mctsmoms", help="number of MCTS mothers", type=int, default=0)
    parser.add_argument("--mctskids", help="number of MCTS children", type=int, default=0)
    parser.add_argument("--lmp", help="static LMP", action="store_true")
    parser.add_argument("--dob", help="static DOB", action="store_true")
    args = parser.parse_args()

    if args.verbose:
        print "verbose mode\nargs={}".format(args)

    #
    # http defaults
    #
    unirest.timeout(30)
    unirest.default_header("Accept", "application/json")

    #
    # Clear the database?
    #
    if args.cleardb:
        unirest.timeout(600)
        exec_http_get("{}/module/testing/clearDatabase".format(args.server))
        unirest.timeout(30)
        exec_http_get("{}/module/testing/createSubscriptionPacks".format(args.server))
        import_region_domain_data("state")
        import_region_domain_data("circleName")
        import_region_domain_data("district")
        import_domain_data("languageCode/import", "language_location_code.csv")
Esempio n. 58
0
import unirest

mashape_auth = "YOUR_MASHAPE_API_KEY"
topics = ["economy", "gold", "TATAMOTORS", "DRREDDY"]
unirest.timeout(10)

for t in topics:
	print t
	response = unirest.get("https://stockviz.p.mashape.com/news/news", 
		params={
			"symbol": t
		},
		headers={
	    		"X-Mashape-Authorization": mashape_auth,
	    		"Accept": "application/json"
  		}
	);
	
	if response.code == 200:
		for r in response.body:
			if r.has_key('HEADLINE') and r.has_key('SOURCE'):
				print r['HEADLINE'].encode('cp850', errors='replace'), ": ", r['SOURCE']	
	else:
		print "response: ", response.code, ". skipping..."
Esempio n. 59
0
    def get_recipes(self):

        unique_IDs = []
        dict_keys = []
        dict_title = {}
        dict_cal = {}
        dict_prot = {}
        dict_carb = {}
        dict_fat = {}
        dict_price = {}
        dict_time = {}

        for recipe_type_index in range(len(self.recipe_types)):

            offset = random.randrange(0, 500)
            #offset was 0 - to regenarete random results
            print(offset)
            reached_length = False
            counter = 0
            while not reached_length and counter < 2:

                for i in range(1, 2):
                    offset = i * offset  #if range is bigger than one, you can get more results (one api call returns 100 recipes)
                    req_URL = self.get_URL(
                        self.recipe_types[recipe_type_index], offset)
                    print(req_URL)

                    unirest.timeout(100)  #100s timeout
                    print '************'
                    response = unirest.get(req_URL,
                                           headers={
                                               "X-Mashape-Key":
                                               settings.SPOONACULAR_KEY,
                                               "Accept": "application/json"
                                           })
                    print response.body
                    print settings.SPOONACULAR_KEY
                    json_data = response.body["results"]

                    print(len(json_data))
                    print(offset)
                    print(json_data)

                    if len(json_data) < 30:
                        offset = random.randrange(0, 500)
                        counter += 1

                    else:
                        reached_length = True
                        counter = 4

                    val_title = []
                    val_prot = []
                    val_carb = []
                    val_cal = []
                    val_fat = []
                    val_price = []
                    val_time = []

                    for d in json_data:
                        if d["id"] not in unique_IDs:
                            k = d['id']
                            unique_IDs.append(k)
                            dict_keys.append(k)
                            for key, value in d.iteritems():
                                if key == "title":
                                    v = d[key].encode('ascii', 'ignore')
                                    val_title.append(v)
                                if key == "calories":
                                    v = d[key]
                                    val_cal.append(v)
                                if key == "protein":
                                    v = int(re.findall(r'\d+', d[key])[0])
                                    val_prot.append(v)
                                if key == "carbs":
                                    v = int(re.findall(r'\d+', d[key])[0])
                                    val_carb.append(v)
                                if key == "fat":
                                    v = int(re.findall(r'\d+', d[key])[0])
                                    val_fat.append(v)
                                if key == "pricePerServing":
                                    v = round(float(d[key]) / 100, 2)
                                    val_price.append(v)
                                if key == "readyInMinutes":
                                    v = int(d[key])
                                    val_time.append(v)

            dict_cal_temp = dict(zip(dict_keys, val_cal))
            dict_prot_temp = dict(zip(dict_keys, val_prot))
            dict_carb_temp = dict(zip(dict_keys, val_carb))
            dict_fat_temp = dict(zip(dict_keys, val_fat))
            dict_title_temp = dict(zip(dict_keys, val_title))
            dict_price_temp = dict(zip(dict_keys, val_price))
            dict_time_temp = dict(zip(dict_keys, val_time))

            dict_cal[
                self.recipe_types[recipe_type_index].strip()] = dict_cal_temp
            dict_prot[
                self.recipe_types[recipe_type_index].strip()] = dict_prot_temp
            dict_carb[
                self.recipe_types[recipe_type_index].strip()] = dict_carb_temp
            dict_fat[
                self.recipe_types[recipe_type_index].strip()] = dict_fat_temp
            dict_title[
                self.recipe_types[recipe_type_index].strip()] = dict_title_temp
            dict_price[
                self.recipe_types[recipe_type_index].strip()] = dict_price_temp
            dict_time[
                self.recipe_types[recipe_type_index].strip()] = dict_time_temp

            dict_keys = []
            val_cal = []
            val_prot = []
            val_carb = []
            val_fat = []
            val_title = []
            val_price = []
            val_time = []

        return {
            "json": response.body,
            "unique_IDs": unique_IDs,
            "url": req_URL,
            "dict_cal": dict_cal,
            "dict_prot": dict_prot,
            "dict_carb": dict_carb,
            "dict_fat": dict_fat,
            "dict_price": dict_price,
            "dict_time": dict_time,
            "dict_title": dict_title,
            "recipe_types": self.recipe_types
        }