Ejemplo n.º 1
0
    def test_status(self):
        """
        Test if server can receive requests
        """

        response = requests.get('http://127.0.0.1:1337')
        self.assertEqual(response.status_code, 200)
        self.assertEqual(str(response.text), 'Error with setting code')

        with self.assertRaises(requests.ConnectionError):
            response = requests.get('http://127.0.0.1:1337')
Ejemplo n.º 2
0
def get_stream():
    from src.telegram import send
    with requests.get(
            TWITTER_STREAM_URL,
            headers={
                "Authorization": "Bearer {}".format(TWITTER_BEARER_TOKEN)
            },
            stream=True,
            params={
                "tweet.fields":
                "attachments,author_id,created_at,source",
                "expansions":
                "author_id,attachments.media_keys",
                "media.fields":
                "duration_ms,height,media_key,preview_image_url,type,url,width"
            }) as response:
        logger.info("stream status code: {}".format(response.status_code))
        if response.status_code != 200:
            raise Exception("Cannot get stream (HTTP {}): {}".format(
                response.status_code, response.text))
        for response_line in response.iter_lines():
            if response_line:
                json_response = json.loads(response_line)
                if json_response.get("includes"):
                    logger.info("get new tweet from: {}".format(
                        json_response.get("includes").get('users')))
                if json_response.get("error"):
                    raise Exception("Disconnected")
                send(json_response)
Ejemplo n.º 3
0
    def is_commit_older_than(self, commit_url: str, older_than_days: int):
        response = requests.get(url=commit_url, headers=self.make_headers())
        if response.status_code != 200:
            raise RuntimeError(
                f'Failed to make request to {commit_url}. {response} {response.json()}'
            )

        commit: dict = response.json().get('commit', {})
        committer: dict = commit.get('committer', {})
        author: dict = commit.get('author', {})

        # Get date of the committer (instead of the author) as the last commit could be old but just applied
        # for instance coming from a merge where the committer is bringing in commits from other authors
        # Fall back to author's commit date if none found for whatever bizarre reason
        commit_date_raw = committer.get('date', author.get('date'))
        if commit_date_raw is None:
            print(
                f"Warning: could not determine commit date for {commit_url}. Assuming it's not old enough to delete"
            )
            return False

        # Dates are formatted like so: '2021-02-04T10:52:40Z'
        commit_date = datetime.strptime(commit_date_raw, "%Y-%m-%dT%H:%M:%SZ")

        delta = datetime.now() - commit_date
        print(f'Last commit was on {commit_date_raw} ({delta.days} days ago)')

        return delta.days > older_than_days
Ejemplo n.º 4
0
def get_twitter_rules(save=True):
    response = requests.get(url=TWITTER_RULES_URL, headers=HEADERS)
    if response.status_code == 200:
        rules = response.json()
        if save:
            for rule in rules.get("data"):
                redis_cli.hsetnx("rules", rule.get("id"), rule.get("value"))
        return rules
    return response.json()
Ejemplo n.º 5
0
    def get_default_branch(self) -> str:
        url = f'{GH_BASE_URL}/repos/{self.github_repo}'
        headers = self.make_headers()

        response = requests.get(url=url, headers=headers)

        if response.status_code != 200:
            raise RuntimeError(
                'Error: could not determine default branch. This is a big one.'
            )

        return response.json().get('default_branch')
Ejemplo n.º 6
0
 def __get_video_from_v1(self):
     tweet_status = requests.get(
         TWITTER_STATUS_URL.format(self.tweet_id),
         headers={
             "Authorization": "Bearer {}".format(TWITTER_BEARER_TOKEN)
         })
     if tweet_status.status_code == 200:
         extended_entities = tweet_status.json().get("extended_entities")
         if extended_entities:
             self.media = extended_entities.get("media")
         else:
             logger.error(
                 "can't get extended_entities details: \n\t{}".format(
                     tweet_status.json()))
Ejemplo n.º 7
0
    def is_pull_request_base(self, branch: str) -> bool:
        """
        Returns true if the given branch is base for another pull request.
        """
        url = f'{GH_BASE_URL}/repos/{self.github_repo}/pulls?base={branch}'
        headers = self.make_headers()
        headers['accept'] = 'application/vnd.github.groot-preview+json'

        response = requests.get(url=url, headers=headers)
        if response.status_code != 200:
            raise RuntimeError(
                f'Failed to make request to {url}. {response} {response.json()}'
            )

        return len(response.json()) > 0
Ejemplo n.º 8
0
    def has_open_pulls(self, commit_hash: str) -> bool:
        """
        Returns true if commit is part of an open pull request or the branch is the base for a pull request
        """
        url = f'{GH_BASE_URL}/repos/{self.github_repo}/commits/{commit_hash}/pulls'
        headers = self.make_headers()
        headers['accept'] = 'application/vnd.github.groot-preview+json'

        response = requests.get(url=url, headers=headers)
        if response.status_code != 200:
            raise RuntimeError(
                f'Failed to make request to {url}. {response} {response.json()}'
            )

        pull_request: dict
        for pull_request in response.json():
            if pull_request.get('state') == 'open':
                return True

        return False
Ejemplo n.º 9
0
	def search_by_attribute(self, item_type, value, attribute = None, cache = False, RT_Key = False):
		from src import requests
		itemlist = []
		if item_type == 'book':
			# Search with 'attribute = None' when searching for an OLID
			value = urllib.quote(value)
			if(attribute == None):
				query = "q=" + value
			elif(attribute == "ISBN"):
				query = "isbn=" + value
			elif(attribute == "title"):
				query = "title=" + value
			elif(attribute == "author"):
				query = "author=" + value
			else:
				logging.debug("Item.search_by_attribute() was called with an invalid attribute: %s" %attribute)
				return itemlist
			url = "http://openlibrary.org/search.json?" + query
			response = requests.get(url)
			counter = 0
			try:
				if response.status_code == 200:
					json_response = response.json()
					for book in json_response['docs']:
						if cache:
							# Check to see if Item is already in the database; if so, update that copy
							checkItem = Item.query(Item.item_key==book['key']).get()
							if checkItem:
								curItem = checkItem
								createNew = False
							else:
								createNew = True
						else:
							createNew = True
							
						if createNew:
							curItem = Item(item_key=None)
							curItem.item_type = "book"
							curItem.item_key = book['key']
							
						if 'title' in book:
							if 'subtitle' in book:
								curItem.title = book['title'] + ": " + book['subtitle']
							else:
								curItem.title = book['title']
						else:
							curItem.title = ""
					
						if 'author_name' in book:
							curItem.author = book['author_name'][0]
							for i in range(1, len(book['author_name'])):
								curItem.author += ", " + book['author_name'][i]
						else:
							curItem.author = ""
					
						if 'cover_i' in book:
							curItem.thumbnail_link = "http://covers.openlibrary.org/b/id/" + str(book['cover_i']) + "-M.jpg"
						else:
							curItem.thumbnail_link = ""
					
						curItem.last_update = datetime.now()
						if cache:
							curItem.put()
						itemlist.append(curItem.to_dict())
						counter += 1
			except:
				pass
		elif item_type == 'movie':
			query = value
			apikey = "f4dr8ebyf9pmh4wskegrs3vt"
			logging.info("RT API Key, updated 5/10")
			if not RT_Key:
				url = "http://api.rottentomatoes.com/api/public/v1.0/movies.json?apikey=" + apikey + "&q=" + query + "&page_limit=50"
				response = requests.get(url)
				try:
					logging.info("RT Status Code: %s" %response.status_code)
					if response.status_code == 200:
						json_response = response.json()
						for movie in json_response['movies']:
							if cache:
								# Check to see if Item is already in the database; if so, update that copy
								checkItem = Item.query(Item.item_key==movie['id']).get()
								if checkItem:
									curItem = checkItem
									createNew = False
								else:
									createNew = True
							else:
								createNew = True
								
							if createNew:
								# Build itemlist of movies
								curItem = Item(item_key=None)
								curItem.item_type = "movie"
								curItem.item_key = movie['id']
							
							curItem.title = movie['title']
							if isinstance(movie.get('year',9999),(int,long)):
								curItem.year = movie.get('year',9999)
							else:
								curItem.year = 9999
							curItem.rating = movie.get('mpaa_rating',"Rating Not Available")
							curItem.description = movie.get('synopsis',"Synopsis Not Available")
							curItem.thumbnail_link = movie['posters'].get('thumbnail','')
							curItem.direct_link = movie['links'].get('alternate','')
							# To get genre, open detail page (but only do it when caching since it will be slow with many movies)
							if cache:
								url = movie['links']['self'] + "?apikey=" + apikey
								response_detail = requests.get(url)
								try:
									if response_detail.status_code == 200:
										movie = response_detail.json()
										curItem.genre = movie['genres'][0]
										for i in range(1, len(movie['genres'])):
											curItem.genre += ", " + movie['genres'][i]
								except:
									curItem.genre = ""
								curItem.last_update = datetime.now()
								if cache:
									curItem.put()
							itemlist.append(curItem.to_dict())
				except:
					pass
			else:
				# Searching for a specific Rotten Tomatoes key (Rotten Tomatoes does not send you to the direct movie link with a search query, so it's necessary to access it directly)
				url = "http://api.rottentomatoes.com/api/public/v1.0/movies/" + value + ".json?apikey=" + apikey
				response_detail = requests.get(url)
				try:
					if response_detail.status_code == 200:
						movie = response_detail.json()
						if cache:
							# Check to see if Item is already in the database; if so, update that copy
							checkItem = Item.query(Item.item_key==value).get()
							if checkItem:
								curItem = checkItem
								createNew = False
							else:
								createNew = True
						else:
							createNew = True
							
						if createNew:
							curItem = Item(item_key=None)
							curItem.item_type = "movie"
							curItem.item_key = value
							
						curItem.title = movie['title']
						if isinstance(movie.get('year',9999),(int,long)):
							curItem.year = movie.get('year',9999)
						else:
							curItem.year = 9999
						curItem.rating = movie.get('mpaa_rating',"Rating Not Available")
						curItem.description = movie.get('synopsis',"Synopsis Not Available")
						curItem.thumbnail_link = movie['posters'].get('thumbnail','')
						curItem.direct_link = movie['links'].get('alternate','')
						curItem.genre = movie['genres'][0]
						for i in range(1, len(movie['genres'])):
							curItem.genre += ", " + movie['genres'][i]
						curItem.last_update = datetime.now()
						if cache:
								curItem.put()
						itemlist.append(curItem.to_dict())
				except:
					pass
		return itemlist
Ejemplo n.º 10
0
    def get_deletable_branches(self, last_commit_age_days: int,
                               ignore_branches: list) -> list:
        # Default branch might not be protected
        default_branch = self.get_default_branch()

        url = self.get_paginated_branches_url()
        headers = self.make_headers()

        response = requests.get(url=url, headers=headers)
        if response.status_code != 200:
            raise RuntimeError(
                f'Failed to make request to {url}. {response} {response.json()}'
            )

        deletable_branches = []
        branch: dict
        branches: list = response.json()
        current_page = 1

        while len(branches) > 0:
            for branch in branches:
                branch_name = branch.get('name')

                commit_hash = branch.get('commit', {}).get('sha')
                commit_url = branch.get('commit', {}).get('url')

                print(f'Analyzing branch `{branch_name}`...')

                # Immediately discard protected branches, default branch and ignored branches
                if branch_name == default_branch:
                    print(
                        f'Ignoring `{branch_name}` because it is the default branch'
                    )
                    continue

                # We're already retrieving non-protected branches from the API, but it pays being careful when dealing
                # with third party apis
                if branch.get('protected') is True:
                    print(f'Ignoring `{branch_name}` because it is protected')
                    continue

                if branch_name in ignore_branches:
                    print(
                        f'Ignoring `{branch_name}` because it is on the list of ignored branches'
                    )
                    continue

                # Move on if commit is in an open pull request
                if self.has_open_pulls(commit_hash=commit_hash):
                    print(
                        f'Ignoring `{branch_name}` because it has open pulls')
                    continue

                # Move on if branch is base for a pull request
                if self.is_pull_request_base(branch=branch_name):
                    print(
                        f'Ignoring `{branch_name}` because it is the base for a pull request of another branch'
                    )
                    continue

                # Move on if last commit is newer than last_commit_age_days
                if self.is_commit_older_than(
                        commit_url=commit_url,
                        older_than_days=last_commit_age_days) is False:
                    print(
                        f'Ignoring `{branch_name}` because last commit is newer than {last_commit_age_days} days'
                    )
                    continue

                print(
                    f'Branch `{branch_name}` meets the criteria for deletion')
                deletable_branches.append(branch_name)

            # Re-request next page
            current_page += 1

            response = requests.get(
                url=self.get_paginated_branches_url(page=current_page),
                headers=headers)
            if response.status_code != 200:
                raise RuntimeError(
                    f'Failed to make request to {url}. {response} {response.json()}'
                )

            branches: list = response.json()

        return deletable_branches