class TestEvent(unittest.TestCase):

    def setUp(self):
        self.driver = Browser()
        self.driver.goToFacebook()
        self.driver.login('*****@*****.**', 'selab1623')
        time.sleep(2)
        self.driver.goToFacebookEvent()

    def test_create_event(self):
        timestamp = str(time.time())
        self.driver.createEvent('i will take 100 score ' + timestamp)
        time.sleep(2)
        assert self.driver.isCreatedEvent('i will take 100 score ' + timestamp)

    def test_invite_friends(self):
        timestamp = str(time.time())
        self.driver.createEvent('i will take 100 score ' + timestamp)
        time.sleep(4)
        self.driver.inviteFriend()
        time.sleep(2)
        self.driver.isInvitedFriend()

    def tearDown(self):
        self.driver.close()
class TestComment(unittest.TestCase):

    def setUp(self):
        self.driver = Browser()
        self.driver.goToFacebook()
        self.driver.login('*****@*****.**', 'selab1623')
        self.driver.createStory('cliu is good' + str(time.time()))
        time.sleep(5)

    def test_like(self):
        self.driver.likePosts()
        time.sleep(3)
        assert self.driver.isLikePosts('SE TaipeiTech')

    def test_unlike(self):
        self.driver.likePosts()
        time.sleep(2)
        self.driver.unLikePosts()
        time.sleep(2)
        assert not self.driver.isLikePosts('SE TaipeiTech')

    def test_share(self):
        timestamp = time.time()
        self.driver.sharePost('ya, just share it ' + str(timestamp))
        self.driver.goToProfile()
        time.sleep(3)
        assert self.driver.isStoryCreated('ya, just share it ' + str(timestamp))

    def tearDown(self):
        self.driver.close()
Example #3
0
class Base(object):
    def __init__(self):
        self.browser = Browser()
        self.start_browser()

        self.logged_in = False

    def start_browser(self):
        self.browser.start()

    def close_browser(self):
        self.browser.close()
class TestLike(unittest.TestCase):

    def setUp(self):
        self.driver = Browser()
        self.driver.goToFacebook()
        self.driver.login('*****@*****.**', 'selab1623')

    def test_create_story(self):
        timestamp = str(time.time())
        self.driver.createStory('just for fun you know, i\'m testing all day long' + timestamp)
        time.sleep(5)
        assert self.driver.isStoryCreated('just for fun you know, i\'m testing all day long' + timestamp)


    def tearDown(self):
        self.driver.close()
class TestPhoto(unittest.TestCase):

    def setUp(self):
        self.driver = Browser()
        self.driver.goToFacebook()
        self.driver.login('*****@*****.**', 'selab1623')
        time.sleep(2)
        self.driver.goToFacebookPhotoManagement()
        time.sleep(3)

    def test_delete_photo(self):
        self.driver.deletePhotos()
        time.sleep(3)


    def tearDown(self):
        self.driver.close()
class BrowserTest(unittest.TestCase):
	'''going to test basic Browser Usage'''


	def setUp(self):
		self.browser = Browser()
	'''
	def test_browser_is_true(self):
		self.assertTrue(self.browser)

	def test_check_settings_file(self):
		self.assertTrue(self.browser.settings)


	def test_go_to_jira(self):
		self.browser.go()
		self.assertIn('JIRA', self.browser.title())
	
	def test_log_into_jira(self):
		self.browser.go()
		self.browser.log_into_jira()
		self.browser.wait().until(lambda x: x.find_element_by_link_text('Farhan Syed'))
		link_text = self.browser.webdriver.find_element_by_link_text('Farhan Syed')
		self.assertIn('Farhan', link_text.text)


	def test_navigate_to_Delta_Iphone(self):
		self.browser.go()
		self.browser.log_into_jira()
		self.browser.select_delta_iphone_dashboard()
		self.assertIn('Iphone', self.browser.title())
	'''

	def test_to_download_xsls_files(self):
		self.browser.go()
		self.browser.log_into_jira()
		self.browser.wait().until(lambda x: x.find_element_by_link_text('Farhan Syed'))
		self.browser.select_delta_iphone_dashboard()
		self.browser.download_xsls_files()
		###self.assert #Some directory to check stuff #



	def tearDown(self):
		self.browser.close()
class TestMessage(unittest.TestCase):

    def setUp(self):
        self.driver = Browser()
        self.driver.goToFacebook()
        self.driver.login('*****@*****.**', 'selab1623')

    def test_send_message(self):
        timestamp = str(time.time())
        self.driver.sendMessage('i am Mr.Big ' + str(timestamp))
        time.sleep(2)
        assert self.driver.isSendingMessage('i am Mr.Big ' + str(timestamp))

    def test_send_photo(self):
        timestamp = str(time.time())
        self.driver.sendPhotoMessage('../image.gif')
        time.sleep(8)

    def tearDown(self):
        self.driver.close()
Example #8
0
class TestBrowser(TestCase):
    def setUp(self):
        self.b = Browser()
        self.b.start()

    def tearDown(self):
        self.b.close()

    def test_can_start_browser(self):
        pass

    def test_can_get_google(self):
        self.b.get("http://www.google.com")
        self.assertEqual(self.b.title, "Google")

    def test_can_get_inputs(self):
        raise NotImplemented

    def test_can_submit_form(self):
        raise NotImplemented

    def test_can_go_and_come_back(self):
        raise NotImplemented
class TestCreateStory(unittest.TestCase):

    def setUp(self):
        self.driver = Browser()
        self.driver.goToFacebook()
        self.driver.login('*****@*****.**', 'selab1623')

    def test_create_story(self):
        timestamp = str(time.time())
        self.driver.createStory('just for fun you know, i\'m testing all day long' + timestamp)
        time.sleep(5)
        assert self.driver.isStoryCreated('just for fun you know, i\'m testing all day long' + timestamp)

    def test_create_image_story(self):
        self.driver.uploadImageStory('../image.gif', 'uh.......')
        time.sleep(7)
        assert self.driver.isUploadedImageStory('SE TaipeiTech', 'uh.......')

    def test_create_video_story(self):
        self.driver.uploadVideoStory('../video.mp4', 'yap')

    def tearDown(self):
        self.driver.close()
Example #10
0
class APIFinder:

	def __init__(self, url=None, harDirectory=None, searchString=None, removeParams=False, count=1, cookies=None):
		self.url = url
		self.harDirectory = harDirectory
		self.searchString = searchString
		self.removeParams = removeParams
		self.count = count
		self.browser = None
		self.cookies = cookies
		
	def start(self):
		if self.count > 1 and self.url is None:
			print("Cannot provide page count with no URL given")
			exit(1)
		if self.removeParams and self.url is None:
			print("WARNING: Must have Internet connection to remove unneeded parameters")

		#Scan for all APIs
		if self.url:
			os.makedirs(self.harDirectory,exist_ok=True)
			self.deleteExistingHars()
			self.browser = Browser("chromedriver/chromedriver", "browsermob-proxy-2.1.4/bin/browsermob-proxy", self.harDirectory, cookies=self.cookies)
			if self.searchString is not None:
				print("Searching URL "+self.url+" for string "+self.searchString)
			#Move recursively through the site
			apiCalls = self.crawlingScan(self.url)
			
		#Scan directory of har files
		else:
			print("Parsing existing directory of har files")
			harParser = HarParser(self.harDirectory, self.searchString, self.removeParams)
			apiCalls = harParser.parseMultipleHars()

		if self.browser is not None:
			self.browser.close()

		return apiCalls

	def openURL(self, url):
		return self.browser.get(url) #load the url in Chrome


	def isInternal(self, url, baseUrl):
		if url.startswith("/"):
			return baseUrl+url
		if tldextract.extract(baseUrl).domain == tldextract.extract(url).domain:
			return url
		return None


	def findInternalURLsInText(self, text, currentUrl, allFoundURLs):
		newUrls = []
		regex = re.compile(r'(https?://[\w]+\.)(com|org|biz|net)((/[\w]+)+)(\.[a-z]{2,4})?(\?[\w]+=[\w]+)?((&[\w]+=[\w]+)+)?', re.ASCII)

		matches = re.finditer(regex, text)

		for match in matches:
			print(str(match.group()))

	#Returns a list of all internal URLs on a page as long
	#as they are either relative URLs or contain the current domain name
	def findInternalURLs(self, bsObj, currentUrl, allFoundURLs):
		newUrls = []
		baseUrl = urlparse(currentUrl).scheme+"://"+urlparse(currentUrl).netloc
		#Finds all links that begin with a "/"
		for link in bsObj.findAll("a"):
			if 'href' in link.attrs:
				#baseUrl, urlInPage = parseUrl(link.attrs)
				url = link.attrs['href']
				#It's an internal URL and we haven't found it already
				url = self.isInternal(url, baseUrl)
				if url is not None and url not in newUrls and url not in allFoundURLs:
					newUrls.append(url)
					allFoundURLs.append(url)
		return allFoundURLs, newUrls


	def getContentType(self,headers):
		for header in headers:
			if header["name"] == "Content-Type":
				return header["value"]

	#Get rid of all the current har files
	def deleteExistingHars(self):
		files = os.listdir(self.harDirectory)
		for singleFile in files:
			if "har" in singleFile:
				os.remove(self.harDirectory+"/"+singleFile)


	#Performs a recursive crawl of a site, searching for APIs
	def crawlingScan(self, url, apiCalls = [], allFoundURLs = []):
		
		self.count = self.count - 1
		if self.count < 0:
			return

		harParser = HarParser(self.harDirectory, searchString=self.searchString, removeParams=self.removeParams)

		#If uncommented, will return as soon as a matching call is found
		#if self.searchString is not None and len(apiCalls) > 0:
		#	return apiCalls
		try:
			print("Scanning URL: "+url)
			html = self.openURL(url)
			if html is not None:
				bsObj = BeautifulSoup(html, "lxml")

				harObj = harParser.getSingleHarFile()
				apiCalls = harParser.scanHarfile(harObj, apiCalls=apiCalls)

				allFoundURLs, newUrls = self.findInternalURLs(bsObj, url, allFoundURLs)
				shuffle(newUrls)
				
				for newUrl in newUrls:
					self.crawlingScan(newUrl, apiCalls, allFoundURLs)
		
		except (KeyboardInterrupt, SystemExit):
			print("Stopping crawl")
			self.browser.close()
			apiWriter = APIWriter(apiCalls)
			apiWriter.outputAPIs()
			exit(1)
		return apiCalls
Example #11
0
class APIFinder:
    def __init__(self,
                 url=None,
                 harDirectory=None,
                 searchString=None,
                 removeParams=False,
                 count=1,
                 cookies=None):
        self.url = url
        self.harDirectory = harDirectory
        self.searchString = searchString
        self.removeParams = removeParams
        self.count = count
        self.browser = None
        self.cookies = cookies

    def start(self):
        if self.count > 1 and self.url is None:
            print("Cannot provide page count with no URL given")
            exit(1)
        if self.removeParams and self.url is None:
            print(
                "WARNING: Must have Internet connection to remove unneeded parameters"
            )

        #Scan for all APIs
        if self.url:
            os.makedirs(self.harDirectory, exist_ok=True)
            self.deleteExistingHars()
            self.browser = Browser(
                "chromedriver/chromedriver",
                "browsermob-proxy-2.1.4/bin/browsermob-proxy",
                self.harDirectory,
                cookies=self.cookies)
            if self.searchString is not None:
                print("Searching URL " + self.url + " for string " +
                      self.searchString)
            #Move recursively through the site
            apiCalls = self.crawlingScan(self.url)

        #Scan directory of har files
        else:
            print("Parsing existing directory of har files")
            harParser = HarParser(self.harDirectory, self.searchString,
                                  self.removeParams)
            apiCalls = harParser.parseMultipleHars()

        if self.browser is not None:
            self.browser.close()

        return apiCalls

    def openURL(self, url):
        return self.browser.get(url)  #load the url in Chrome

    def getDomain(self, url):
        return urlparse(url).netloc.lstrip('www.')

    def isInternal(self, url, baseUrl):
        if url.startswith("/"):
            return baseUrl + url
        if self.getDomain(baseUrl) == self.getDomain(url):
            return url
        return None

    def findInternalURLsInText(self, text, currentUrl, allFoundURLs):
        newUrls = []
        regex = re.compile(
            r'(https?://[\w]+\.)(com|org|biz|net)((/[\w]+)+)(\.[a-z]{2,4})?(\?[\w]+=[\w]+)?((&[\w]+=[\w]+)+)?',
            re.ASCII)

        matches = re.finditer(regex, text)

        for match in matches:
            print(str(match.group()))

    #Returns a list of all internal URLs on a page as long
    #as they are either relative URLs or contain the current domain name
    def findInternalURLs(self, bsObj, currentUrl, allFoundURLs):
        newUrls = []
        baseUrl = urlparse(currentUrl).scheme + "://" + urlparse(
            currentUrl).netloc
        #Finds all links that begin with a "/"
        for link in bsObj.findAll("a"):
            if 'href' in link.attrs:
                #baseUrl, urlInPage = parseUrl(link.attrs)
                url = link.attrs['href']
                #It's an internal URL and we haven't found it already
                url = self.isInternal(url, baseUrl)
                if url is not None and url not in newUrls and url not in allFoundURLs:
                    newUrls.append(url)
                    allFoundURLs.append(url)
        return allFoundURLs, newUrls

    def getContentType(self, headers):
        for header in headers:
            if header["name"] == "Content-Type":
                return header["value"]

    #Get rid of all the current har files
    def deleteExistingHars(self):
        files = os.listdir(self.harDirectory)
        for singleFile in files:
            if "har" in singleFile:
                os.remove(self.harDirectory + "/" + singleFile)

    #Performs a recursive crawl of a site, searching for APIs
    def crawlingScan(self, url, apiCalls=[], allFoundURLs=[]):
        self.count = self.count - 1
        if self.count < 0:
            return

        harParser = HarParser(self.harDirectory,
                              searchString=self.searchString,
                              removeParams=self.removeParams)

        #If uncommented, will return as soon as a matching call is found
        #if self.searchString is not None and len(apiCalls) > 0:
        #	return apiCalls
        try:
            print("Scanning URL: " + url)
            html = self.openURL(url)
            if html is not None:
                bsObj = BeautifulSoup(html, "lxml")

                harObj = harParser.getSingleHarFile()
                apiCalls = harParser.scanHarfile(harObj, apiCalls=apiCalls)

                allFoundURLs, newUrls = self.findInternalURLs(
                    bsObj, url, allFoundURLs)
                shuffle(newUrls)

                for newUrl in newUrls:
                    self.crawlingScan(newUrl, apiCalls, allFoundURLs)

        except (KeyboardInterrupt, SystemExit):
            print("Stopping crawl")
            self.browser.close()
            apiWriter = APIWriter(apiCalls)
            apiWriter.outputAPIs()
            exit(1)
        return apiCalls