def fetch_test_data(data_name): json_file = fetch_test_data_file(file=TEST_JSON_FILE) if type(json_file.get(data_name)) is list: return generate_mock_tweet( raw_status=[Status(**s) for s in json_file.get(data_name)]) else: return generate_mock_tweet(raw_status=Status( **json_file.get(data_name)))
def NewFromJsonDict(cls, data, **kwargs): """ Create a new instance based on a JSON dict. Args: data: A JSON dict, as converted from the JSON in the twitter API Returns: A twitter.Status instance """ current_user_retweet = None hashtags = None media = None quoted_status = None retweeted_status = None urls = None user = None user_mentions = None # for loading extended tweets from the streaming API. if 'extended_tweet' in data: for k, v in data['extended_tweet'].items(): data[k] = v if 'user' in data: user = User.NewFromJsonDict(data['user']) if 'retweeted_status' in data: retweeted_status = Status.NewFromJsonDict(data['retweeted_status']) if 'current_user_retweet' in data: current_user_retweet = data['current_user_retweet']['id'] if 'quoted_status' in data: quoted_status = Status.NewFromJsonDict(data.get('quoted_status')) if 'entities' in data: if 'urls' in data['entities']: urls = [Url.NewFromJsonDict(u) for u in data['entities']['urls']] if 'user_mentions' in data['entities']: user_mentions = [User.NewFromJsonDict(u) for u in data['entities']['user_mentions']] if 'hashtags' in data['entities']: hashtags = [Hashtag.NewFromJsonDict(h) for h in data['entities']['hashtags']] if 'media' in data['entities']: media = [Media.NewFromJsonDict(m) for m in data['entities']['media']] # the new extended entities if 'extended_entities' in data: if 'media' in data['extended_entities']: media = [Media.NewFromJsonDict(m) for m in data['extended_entities']['media']] return super(cls, cls).NewFromJsonDict(data=data, current_user_retweet=current_user_retweet, hashtags=hashtags, media=media, quoted_status=quoted_status, retweeted_status=retweeted_status, urls=urls, user=user, user_mentions=user_mentions)
def setUpClass(cls): with open("sample_test_data.json", "r") as file: timeline = json.load(file) cls.timeline = [ Status.NewFromJsonDict(status) for status in timeline ] cls.stats = UserTimelineStatistics()
def NewFromJsonDict(cls, data, **kwargs): from twitter import Status if 'entities' in data: if 'url' in data['entities']: urls = [ Url.NewFromJsonDict(u) for u in data['entities']['url']['urls'] ] if urls != None: data['expanded_url'] = urls[0].expanded_url description = data.get('description') if 'description' in data['entities']: urls = [ Url.NewFromJsonDict(u) for u in data['entities']['description']['urls'] ] for url in urls: description = description.replace(url.url, url.expanded_url) # Will fill expanded with default even if no link exists. data['description_expanded'] = description return super(cls, cls).NewFromJsonDict(data=data) if data.get('status', None): status = Status.NewFromJsonDict(data.get('status')) return super(cls, cls).NewFromJsonDict(data=data, status=status) else: return super(cls, cls).NewFromJsonDict(data=data)
def new_tweet_from_json(self, data): """ Converts a json string to a tweet instance. Args: data: json string containing twitter data Returns: twitter.models.status instance """ tweet = Status.NewFromJsonDict(data) if 'urls' in data: tweet.urls = [Url.NewFromJsonDict(u) for u in data['urls']] else: tweet.urls = [] if 'user_mentions' in data: tweet.user_mentions = [ User.NewFromJsonDict(u) for u in data['user_mentions'] ] else: tweet.user_mentions = [] if 'hashtags' in data: tweet.hashtags = [ Hashtag.NewFromJsonDict(h) for h in data['hashtags'] ] else: tweet.hashtags = [] return tweet
def NewFromJsonDict(cls, data, **kwargs): from twitter import Status if data.get('status', None): status = Status.NewFromJsonDict(data.get('status')) return super(cls, cls).NewFromJsonDict(data=data, status=status) else: return super(cls, cls).NewFromJsonDict(data=data)
def setUp(self): self.status = Status( id=4212713, created_at='Fri Jan 26 17:28:19 +0000 2007', text='"Select all" and archive your Gmail inbox. ' ' The page loads so much faster!' ) self.twitter = Twitter()
def convert_dicts_in_status_to_obj(status: Status) -> Status: """Update each attribute of status with Twitter object""" keys_to_update = ["urls", "user", "user_mentions", "quoted_status"] for key in keys_to_update: if key == "urls": status.urls = [Url(**url) for url in status.__getattribute__(key)] elif key == "user": status.user = User(**status.__getattribute__(key)) elif key == "user_mentions": status.user_mentions = [ User(**user) for user in status.__getattribute__(key) ] elif key == "quoted_status": status.quoted_status = (convert_dicts_in_status_to_obj( status=Status(**status.__getattribute__(key))) if status.__getattribute__(key) else None) return status
def load(self): if not exists(self.filename): return False with open(self.filename, 'r') as infile: data = json.load(infile) self.followings = [User.NewFromJsonDict(f) for f in data['followings']] self.tweets = [Status.NewFromJsonDict(t) for t in data['tweets']] self.fetch_timestamp = data['fetch_timestamp'] return self.fetch_timestamp is not None
def _patched_post_update(self, message, media=None, longitude=None, latitude=None): return Status(id=123, user="******", created_at=datetime(2019, 4, 20), text=message)
def single_tweet(request): single_spec = request.param tweet = Status( full_text=single_spec["full_text"], created_at=single_spec["created_at"], ) expected = { "word_counts": Counter(single_spec["counts"]), "group": datetime.date.fromisoformat(single_spec["group"]), } return tweet, expected
def get_search_side_effect(*args, **kwargs): from twitter import Status current_directory = os.path.dirname(os.path.realpath(__file__)) data_file = os.path.join(current_directory, 'test_data', 'get_search.json') tweet_data = open(data_file) data = json.loads(tweet_data.read()) tweet_data.close() return [Status.NewFromJsonDict(x) for x in data.get('statuses')]
def test_average_favourites_with_errors(self): # sprawdza działanie w/w metody w przypadku podania błędnych danych with open("test_average_favorites.json", "r") as file: favourites_with_error = json.load(file) favourites_with_error = [ Status.NewFromJsonDict(status) for status in favourites_with_error ] self.assertRaises(TypeError, self.stats.average_favourites, favourites_with_error)
def NewFromJsonDict(data): """Create a new instance based on a JSON dict. Args: data: A JSON dict, as converted from the JSON in the twitter API Returns: A twitter.User instance """ if 'status' in data: from twitter import Status # Have to do the import here to prevent cyclic imports # in the __init__.py file status = Status.NewFromJsonDict(data['status']) else: status = None return User( id=data.get('id', None), name=data.get('name', None), screen_name=data.get('screen_name', None), location=data.get('location', None), description=data.get('description', None), statuses_count=data.get('statuses_count', None), followers_count=data.get('followers_count', None), favourites_count=data.get('favourites_count', None), default_profile=data.get('default_profile', None), default_profile_image=data.get('default_profile_image', None), friends_count=data.get('friends_count', None), profile_image_url=data.get('profile_image_url_https', data.get('profile_image_url', None)), profile_background_tile=data.get('profile_background_tile', None), profile_background_image_url=data.get( 'profile_background_image_url', None), profile_banner_url=data.get('profile_banner_url', None), profile_sidebar_fill_color=data.get('profile_sidebar_fill_color', None), profile_background_color=data.get('profile_background_color', None), profile_link_color=data.get('profile_link_color', None), profile_text_color=data.get('profile_text_color', None), protected=data.get('protected', None), utc_offset=data.get('utc_offset', None), time_zone=data.get('time_zone', None), url=data.get('url', None), status=status, geo_enabled=data.get('geo_enabled', None), verified=data.get('verified', None), lang=data.get('lang', None), notifications=data.get('notifications', None), contributors_enabled=data.get('contributors_enabled', None), created_at=data.get('created_at', None), listed_count=data.get('listed_count', None))
def grouped_tweets(): tweets = [ Status( full_text=single_spec["full_text"], created_at=single_spec["created_at"], ) for single_spec in tweet_data["tweets"] ] expected_word_counts = { datetime.date.fromisoformat(key): value for key, value in tweet_data["counts"].items() } return tweets, expected_word_counts
def side_effect(*args, **kwargs): from twitter import Status # A special case for testing a response with no results if 'term' in kwargs and kwargs.get('term') == 'Non-existant': return [] current_directory = os.path.dirname(os.path.realpath(__file__)) data_file = os.path.join(current_directory, 'test_data', 'get_search.json') tweet_data = open(data_file) data = json.loads(tweet_data.read()) tweet_data.close() return [Status.NewFromJsonDict(x) for x in data.get('statuses', '')]
def PostRetweet(self, id): '''This code come from issue #130 on python-twitter tracker''' if not self._oauth_consumer: raise TwitterError( "The twitter.Api instance must be authenticated.") try: if int(id) <= 0: raise TwitterError("'id' must be a positive number") except ValueError: raise TwitterError("'id' must be an integer") url = 'http://api.twitter.com/1/statuses/retweet/%s.json' % id json_data = self._FetchUrl(url, post_data={'dummy': None}) data = json.loads(json_data) self._CheckForTwitterError(data) return Status.NewFromJsonDict(data)
def test_parse_tweets(): media_json = { "id": 1, "filename": "a.jpg", "media_url": "https://domain/a.jpg", "type": "photo" } tweet_json = { "id": 1, "user": {"id": 1, "screen_name": "nobody"}, "in_reply_to": "someone else", "in_reply_to_status_id": 0, "text": "Hello, World!", "entities": {"media": [media_json]} } status = Status().NewFromJsonDict(tweet_json) tweet = parse_tweet(status) assert tweet.id == 1, "The parsed tweet's ID did not match what was expected!" assert tweet.media[0].filename == "a.jpg", "The expected filename was not correct!"
def landslip_search_challenge(self, tweet): data = super().ParseAndCheckTwitter(tweet.decode('utf-8')) # 格式转化 tweet_dict = [Status.NewFromJsonDict(x) for x in [data]][0] # 判断是否为demo账户 if tweet_dict.user.id != self.demo_acc_id: logger.error("Received not the demo account, the ID: ", tweet_dict.user.id) return # 清除redis信息 测试用 REDIS_CLIENT.delete(self.demo_acc_id) # 初始化需要回复的tweet id self.wait_reply_message_id = tweet_dict.id # 初始化需要回复的screen name 目前没用 self.screen_name = tweet_dict.user.screen_name # 匹配demo账户的id if tweet_dict.user.id == self.demo_acc_id: logger.info( "---------------------------------------- New tweet matched From Kafka Start -------------------------------------------" ) logger.info( "Tweet ID: {0} , Screen name: {1} Message text : {2}".format( tweet_dict.user.id, tweet_dict.user.screen_name, tweet_dict.text)) logger.info( "---------------------------------------- New tweet matched From Kafka End -------------------------------------------" ) self._message_entry(tweet_dict) # 启动监控监控回复线程 self.reply_monitor()
None, 'quoted_status': None, 'quoted_status_id': None, 'quoted_status_id_str': None, 'retweet_count': 12986, 'retweeted': True, 'retweeted_status': Status( ID=928709913953624066, ScreenName='SenJohnMcCain', Created='Thu Nov 09 19:44: 32 + 0000 2017', Text= 'The allegations against Roy Moore are deeply disturbing and disqualifying. He should immediately step aside and allow the people of Alabama to elect a candidate they can be proud of.' ), 'scopes': None, 'source': '<a href="http://twitter.com" rel="nofollow">Twitter Web Client</a>', 'text': None, 'truncated': False, 'tweet_mode': 'extended', 'urls': [], 'user':
def load_from_json(tweet_id, path): """Load messages from a json file""" with open(path, "r") as tweets_file: cache_set(tweet_id, [ Status.NewFromJsonDict(tweet) for tweet in json.load(tweets_file) ])
def thaw_tweet(self, name): with open(f'tests/twitter_json/{name}.json', 'r') as f: data = f.read() obj = json.loads(data) status = Status.NewFromJsonDict(obj) return status
def cache_get(tweet_id): """Retrieve stored messages from a given tweet id""" return [ Status.NewFromJsonDict(json.loads(tweet)) for tweet in _cache.get(int(tweet_id), []) ]