def get_num_mocs_for_set(self, s): set_id = s['set_id'] result = do_http_get('http://rebrickable.com/api/get_alt_builds', params=collections.OrderedDict( {'key': self.api_key, 'set_id': set_id, 'format': 'json'})) return len(json.loads(result))
def get_all_sets(self, min_year, max_year): """ Return the total number of parts found on Rebrickable.com for the part id :param part_id: a lego part id :return: the total number of parts found for the given part id """ result = do_http_get('http://rebrickable.com/api/search', params=collections.OrderedDict( {'key': self.api_key, 'type': 'S', 'format': 'json', 'min_year': min_year, 'max_year': max_year})) return [x for x in json.loads(result)['results']]
def scrape_page(page): print_('Scraping page {0}'.format(page)) if os.path.exists(getfilepath(page)): print_("Scraping page {0}...already done".format(page)) return True params = PARAMS.copy() params['pg'] = page print('URL=', URL) output = http.do_http_get(URL, params=params) try: json_str = json.loads(output) except ValueError: print_('output=', output) raise if len(json_str['activities']) == 0: print_("Scraping page {0}...no activities".format(page)) return False else: path = getfilepath(page) with open(path, 'w') as fp: json.dump(json_str, fp, indent=3, sort_keys=True) print_("Scraping page {0}...done".format(page)) return True
def test_it_do_http(self): url = 'http://www.google.com' ret = http.do_http_get(url)
def test_it_do_http_get(self): url = 'http://www.cnn.com/search' params = {'q': 'aabbcc'} http.do_http_get(url, params=params)