def main(): url = 'https://blobs.yottabuild.org/targets/stk3700-0.0.0.tar.gz' print 'get:', url resource = Resource(url) response = resource.get() print 'response:', response print 'headers:', dict(response.headers.items()) print 'body len:', len(response.body_string()) url = 'https://blobs.yottabuild.org/targets/stk3700-0.0.0.tar.gz' headers = { } print 'get:', url resource = Resource(url, pool=connection_pool.getPool(), follow_redirect=True) response = resource.get( headers = headers ) print 'response:', response print 'headers:', dict(response.headers.items()) print 'body len:', len(response.body_string()) url = 'https://blobs.yottabuild.org/targets/stk3700-0.0.0.tar.gz' headers = { } print 'get:', url resource = Resource(url, pool=connection_pool.getPool(), follow_redirect=True) response = resource.get( headers = headers ) access_common.unpackTarballStream(response.body_stream(), '/tmp/yttest/blobs/')
def system_ajax(request): """ Utility ajax functions for polling couch and celerymon """ type = request.GET.get("api", None) task_limit = getattr(settings, "CELERYMON_TASK_LIMIT", 5) celerymon_url = getattr(settings, "CELERYMON_URL", "") db = XFormInstance.get_db() ret = {} if type == "_active_tasks": tasks = [] if is_bigcouch() else filter(lambda x: x["type"] == "indexer", db.server.active_tasks()) # for reference structure is: # tasks = [{'type': 'indexer', 'pid': 'foo', 'database': 'mock', # 'design_document': 'mockymock', 'progress': 0, # 'started_on': 1349906040.723517, 'updated_on': 1349905800.679458, # 'total_changes': 1023}, # {'type': 'indexer', 'pid': 'foo', 'database': 'mock', # 'design_document': 'mockymock', 'progress': 70, # 'started_on': 1349906040.723517, 'updated_on': 1349905800.679458, # 'total_changes': 1023}] return HttpResponse(json.dumps(tasks), mimetype="application/json") elif type == "_stats": return HttpResponse(json.dumps({}), mimetype="application/json") elif type == "_logs": pass if celerymon_url != "": cresource = Resource(celerymon_url, timeout=3) if type == "celerymon_poll": # inefficient way to just get everything in one fell swoop # first, get all task types: ret = [] try: t = cresource.get("api/task/name/").body_string() task_names = json.loads(t) except Exception, ex: task_names = [] t = {} logging.error("Error with getting celerymon: %s" % ex) for tname in task_names: taskinfo_raw = json.loads( cresource.get("api/task/name/%s" % (tname), params_dict={"limit": task_limit}).body_string() ) for traw in taskinfo_raw: # it's an array of arrays - looping through [<id>, {task_info_dict}] tinfo = traw[1] tinfo["name"] = ".".join(tinfo["name"].split(".")[-2:]) ret.append(tinfo) ret = sorted(ret, key=lambda x: x["succeeded"], reverse=True) return HttpResponse(json.dumps(ret), mimetype="application/json")
def system_ajax(request): """ Utility ajax functions for polling couch and celerymon """ type = request.GET.get('api', None) task_limit = getattr(settings, 'CELERYMON_TASK_LIMIT', 5) celerymon_url = getattr(settings, 'CELERYMON_URL', '') db = XFormInstance.get_db() ret = {} if type == "_active_tasks": tasks = [] if is_bigcouch() else filter(lambda x: x['type'] == "indexer", db.server.active_tasks()) #for reference structure is: # tasks = [{'type': 'indexer', 'pid': 'foo', 'database': 'mock', # 'design_document': 'mockymock', 'progress': 0, # 'started_on': 1349906040.723517, 'updated_on': 1349905800.679458, # 'total_changes': 1023}, # {'type': 'indexer', 'pid': 'foo', 'database': 'mock', # 'design_document': 'mockymock', 'progress': 70, # 'started_on': 1349906040.723517, 'updated_on': 1349905800.679458, # 'total_changes': 1023}] return HttpResponse(json.dumps(tasks), mimetype='application/json') elif type == "_stats": return HttpResponse(json.dumps({}), mimetype = 'application/json') elif type == "_logs": pass if celerymon_url != '': cresource = Resource(celerymon_url, timeout=3) if type == "celerymon_poll": #inefficient way to just get everything in one fell swoop #first, get all task types: ret = [] try: t = cresource.get("api/task/name/").body_string() task_names = json.loads(t) except Exception, ex: task_names = [] t = {} logging.error("Error with getting celerymon: %s" % ex) for tname in task_names: taskinfo_raw = json.loads(cresource.get('api/task/name/%s' % (tname), params_dict={'limit': task_limit}).body_string()) for traw in taskinfo_raw: # it's an array of arrays - looping through [<id>, {task_info_dict}] tinfo = traw[1] tinfo['name'] = '.'.join(tinfo['name'].split('.')[-2:]) ret.append(tinfo) ret = sorted(ret, key=lambda x: x['succeeded'], reverse=True) return HttpResponse(json.dumps(ret), mimetype = 'application/json')
def get_qr_queue(host=celeryconfig.SERVER_HOST): if not is_bootstrapped: bootstrap() res = Resource(host, manager=manager) auth_params = {'username':celeryconfig.ZPRINTER_USERNAME, 'api_key': celeryconfig.ZPRINTER_API_KEY} r = res.get('/api/zebra_queue/', params_dict=auth_params) json = simplejson.loads(r.body_string()) if len(printer_dict.keys()) == 0: get_printers() if len(json['objects']) > 0: for instance in json['objects']: uri = instance['resource_uri'] zpl_code= instance['zpl_code'] printer_uri = instance['destination_printer'] printer_ip = printer_dict[printer_uri]['ip_address'] printer_port = printer_dict[printer_uri]['port'] instance['fulfilled_date'] = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.000") res.put(uri, simplejson.dumps(instance), headers={'Content-Type': 'application/json'}, params_dict=auth_params) do_send(printer_ip, printer_port, zpl_code, recv=False) else: logging.debug("no jobs")
def count(self): """ Returns the number of records as an integer. The result is not cached nor comes from cache, cache must be handled by the server. """ clone = self._clone() # Instantiation of clone.model is necessary because we can't set # a staticmethod for get_resource_url_count and avoid to set it # for all model without relying on get_resource_url_list instance = clone.model() resource = Resource(instance.get_resource_url_count(), filters=ROA_FILTERS, **ROA_SSL_ARGS) try: parameters = clone.query.parameters logger.debug( u"""Counting : "%s" through %s with parameters "%s" """ % (clone.model.__name__, resource.uri, force_unicode(parameters))) response = resource.get(headers=self._get_http_headers(), **parameters) except Exception as e: raise ROAException(e) response = force_unicode( response.body_string()).encode(DEFAULT_CHARSET) data = self.model.get_parser().parse(StringIO(response)) return self.model.count_response(data)
def count(self): """ Returns the number of records as an integer. The result is not cached nor comes from cache, cache must be handled by the server. """ clone = self._clone() # Instantiation of clone.model is necessary because we can't set # a staticmethod for get_resource_url_count and avoid to set it # for all model without relying on get_resource_url_list instance = clone.model() resource = Resource(instance.get_resource_url_count(), filters=ROA_FILTERS, **ROA_SSL_ARGS) try: parameters = clone.query.parameters logger.debug(u"""Counting : "%s" through %s with parameters "%s" """ % ( clone.model.__name__, resource.uri, force_unicode(parameters))) response = resource.get(headers=self._get_http_headers(), **parameters) except Exception as e: raise ROAException(e) response = force_unicode(response.body_string()).encode(DEFAULT_CHARSET) data = self.model.get_parser().parse(StringIO(response)) return self.model.count_response(data)
def count(self): """ Returns the number of records as an integer. The result is not cached nor comes from cache, cache must be handled by the server. """ clone = self._clone() # Instantiation of clone.model is necessary because we can't set # a staticmethod for get_resource_url_count and avoid to set it # for all model without relying on get_resource_url_list instance = clone.model() resource = Resource(instance.get_resource_url_count(), filters=ROA_FILTERS) try: parameters = clone.query.parameters logger.debug(u"""Counting : "%s" through %s with parameters "%s" """ % ( clone.model.__name__, resource.uri, force_unicode(parameters))) response = resource.get(headers=ROA_HEADERS, **parameters) except Exception as e: raise ROAException(e) cnt = 0 try: cnt = int(response.body_string()) except ValueError: pass return cnt
def get_pull_request_by_label(self, user, repo, label): resource = Resource("https://api.github.com/repos/%s/%s/pulls" % (user, repo)) pulls = json.loads(resource.get(headers=self.headers).body_string()) pulls_by_label = filter(lambda p: p['head']['label'] == label, pulls) return pulls_by_label # I hope there is no more than one
def auth_get_repo(user_password): pool = ConnectionPool(factory=Connection) serverurl = "https://api.github.com" ''' #print 'Enter your username:'******'+')[0] password = user_password.split('+')[1] # Add your username and password here, or prompt for them auth=BasicAuth(username, password) # Use your basic auth to request a token # This is just an example from http://developer.github.com/v3/ authreqdata = { "scopes": [ "public_repo" ], "note": "admin script" } resource = Resource('https://api.github.com/authorizations', pool=pool, filters=[auth]) response = resource.post(headers={ "Content-Type": "application/json" }, payload=json.dumps(authreqdata)) token = json.loads(response.body_string())['token'] ''' """ Once you have a token, you can pass that in the Authorization header You can store this in a cache and throw away the user/password This is just an example query. See http://developer.github.com/v3/ for more about the url structure """ token = '94038d59a46c5ea1aa4f11626a83cde3e8794668' resource = Resource('https://api.github.com/user/repos', pool=pool) headers = {'Content-Type': 'application/json'} headers['Authorization'] = 'token %s' % token response = resource.get(headers=headers) repos = json.loads(response.body_string()) for each in repos: git("clone", each['clone_url'])
def iterator(self): """ An iterator over the results from applying this QuerySet to the remote web service. """ resource = Resource(self.model.get_resource_url_list(), filters=ROA_FILTERS) try: parameters = self.query.parameters logger.debug(u"""Requesting: "%s" through %s with parameters "%s" """ % ( self.model.__name__, resource.uri, force_unicode(parameters))) response = resource.get(headers=ROA_HEADERS, **parameters) except ResourceNotFound: return except Exception as e: raise ROAException(e) response = force_unicode(response.body_string()).encode(DEFAULT_CHARSET) stream = StringIO(response) data = self.model.get_parser().parse(stream) serializer = self.model.get_serializer(data=data) if not serializer.is_valid(): raise ROAException('Invalid deserialization') for obj in serializer.object: obj = res.object yield obj
def get_pull_request_by_branch(self, user, repo, branch): resource = Resource("https://api.github.com/repos/%s/%s/pulls" % (user, repo)) pulls = json.loads(resource.get(headers=self.headers).body_string()) pulls_by_branch = filter(lambda p: p['head']['ref']==branch, pulls) return pulls_by_branch # I hope there is no more than one
class Query(object): def __init__(self, url=GITHUB_URL, params=None, payload=None, headers=None, filters=None, access_token=None): self.url = url self.params = params or dict() self.payload = payload or dict() self.headers = headers or {'Content-Type': 'application/json'} filters = filters or list() self.resource = Resource( url, pool=ConnectionPool(factory=Connection), filters=filters, ) if access_token is not None: self.params["access_token"] = access_token def concat_path(self, *args): for path in args: self.resource.update_uri(path) def do_GET(self, path=None, params=None): params = params or self.params response = self.resource.get(path, self.headers, params) return self.parse_response(response.body_string()) def do_POST(self, path=None, payload=None, params=None): payload = payload or self.payload params = params or self.params response = self.resource.post(path, json.dumps(payload), self.headers, params) return self.parse_response(response.body_string()) def do_DELETE(self, path=None, params=None): params = params or self.params response = self.resource.delete(path, self.headers, params) return self.parse_response(response.body_string()) def do_PATCH(self, path=None, payload=None, params=None): payload = payload or self.payload params = params or self.params response = self.resource.request("PATCH", path, json.dumps(payload), self.headers, params) return self.parse_response(response.body_string()) def parse_response(self, response): try: return json.loads(response) except: return response def __repr__(self): return "uri:<{0}>".format(self.resource.uri) def __str__(self): return self.resource.uri
def getAuthData(): ''' Poll the registry to get the result of a completed authentication (which, depending on the authentication the user chose or was directed to, will include a github or other access token) ''' url = '%s/tokens' % ( Registry_Base_URL ) headers = { } auth = _registryAuthFilter() resource = Resource(url, pool=connection_pool.getPool(), filters=[auth]) try: logger.debug('poll for tokens...') response = resource.get( headers = headers ) except restkit_errors.Unauthorized as e: logger.debug(str(e)) return None except restkit_errors.ResourceNotFound as e: logger.debug(str(e)) return None except restkit_errors.RequestFailed as e: logger.debug(str(e)) return None body = response.body_string() logger.debug('auth data response: %s' % body); r = {} for token in ordered_json.loads(body): if token['provider'] == 'github': r['github'] = token['accessToken'] break logger.debug('parsed auth tokens %s' % r); return r
def _getTarball(url, directory, sha256): auth = _registryAuthFilter() logger.debug('registry: get: %s' % url) if not sha256: logger.warn('tarball %s has no hash to check' % url) resource = Resource(url, pool=connection_pool.getPool(), filters=[auth]) #resource = Resource('http://blobs.yottos.org/targets/stk3700-0.0.0.tar.gz', pool=connection_pool.getPool(), follow_redirect=True) response = resource.get() # there seems to be an issue with following the redirect using restkit: # follow redirect manually if response.status_int == 302 and 'Location' in response.headers: redirect_url = response.headers['Location'] logger.debug('registry: redirect to: %s' % redirect_url) resource = Resource(redirect_url, pool=connection_pool.getPool()) response = resource.get() return access_common.unpackTarballStream(response.body_stream(), directory, ('sha256', sha256))
def check_celery_health(): ret = {} celery_monitoring = getattr(settings, 'CELERY_FLOWER_URL', None) worker_status = "" if celery_monitoring: cresource = Resource(celery_monitoring, timeout=3) all_workers = {} try: t = cresource.get("api/workers").body_string() all_workers = json.loads(t) except Exception, ex: pass worker_ok = '<span class="label label-success">OK</span>' worker_bad = '<span class="label label-important">Down</span>' tasks_ok = 'label-success' tasks_full = 'label-warning' worker_info = [] for hostname, w in all_workers.items(): status_html = mark_safe(worker_ok if w['status'] else worker_bad) tasks_class = tasks_full if w['running_tasks'] == w[ 'concurrency'] else tasks_ok tasks_html = mark_safe( '<span class="label %s">%d / %d</span> :: %d' % (tasks_class, w['running_tasks'], w['concurrency'], w['completed_tasks'])) worker_info.append(' '.join([hostname, status_html, tasks_html])) worker_status = '<br>'.join(worker_info)
def check_heartbeat(): celery_monitoring = getattr(settings, 'CELERY_FLOWER_URL', None) if celery_monitoring: cresource = Resource(celery_monitoring, timeout=3) t = cresource.get("api/workers", params_dict={ 'status': True }).body_string() all_workers = json.loads(t) bad_workers = [] expected_running, expected_stopped = parse_celery_workers(all_workers) celery = Celery() celery.config_from_object(settings) worker_responses = celery.control.ping(timeout=10) pings = parse_celery_pings(worker_responses) for hostname in expected_running: if hostname not in pings or not pings[hostname]: bad_workers.append('* {} celery worker down'.format(hostname)) for hostname in expected_stopped: if hostname in pings: bad_workers.append( '* {} celery worker is running when we expect it to be stopped.' .format(hostname)) if bad_workers: return ServiceStatus(False, '\n'.join(bad_workers)) is_alive = heartbeat.is_alive() return ServiceStatus(is_alive, "OK" if is_alive else "DOWN")
def auth_get_repo(user_password): pool = ConnectionPool(factory=Connection) serverurl="https://api.github.com" ''' #print 'Enter your username:'******'+')[0] password = user_password.split('+')[1] # Add your username and password here, or prompt for them auth=BasicAuth(username, password) # Use your basic auth to request a token # This is just an example from http://developer.github.com/v3/ authreqdata = { "scopes": [ "public_repo" ], "note": "admin script" } resource = Resource('https://api.github.com/authorizations', pool=pool, filters=[auth]) response = resource.post(headers={ "Content-Type": "application/json" }, payload=json.dumps(authreqdata)) token = json.loads(response.body_string())['token'] ''' """ Once you have a token, you can pass that in the Authorization header You can store this in a cache and throw away the user/password This is just an example query. See http://developer.github.com/v3/ for more about the url structure """ token = '94038d59a46c5ea1aa4f11626a83cde3e8794668' resource = Resource('https://api.github.com/user/repos', pool=pool) headers = {'Content-Type' : 'application/json' } headers['Authorization'] = 'token %s' % token response = resource.get(headers = headers) repos = json.loads(response.body_string()) for each in repos: git("clone", each['clone_url'])
def iterator(self): """ An iterator over the results from applying this QuerySet to the remote web service. """ resource = Resource(self.model.get_resource_url_list(**self.query.filters), filters=ROA_FILTERS, **ROA_SSL_ARGS) try: parameters = self.query.parameters logger.debug(u"""Requesting: "%s" through %s with parameters "%s" """ % ( self.model.__name__, resource.uri, force_unicode(parameters))) response = resource.get(headers=self._get_http_headers(), **parameters) except ResourceNotFound: return except Exception as e: raise ROAException(e) response = force_unicode(response.body_string()).encode(DEFAULT_CHARSET) # Deserializing objects: data = self.model.get_parser().parse(StringIO(response)) # [] is the case of empty no-paginated result if data != []: serializer = self.model.get_serializer(data=data) if not serializer.is_valid(): raise ROAException(u'Invalid deserialization for {} model: {}'.format(self.model, serializer.errors)) for obj in serializer.object: yield obj
def check_heartbeat(): celery_monitoring = getattr(settings, "CELERY_FLOWER_URL", None) if celery_monitoring: cresource = Resource(celery_monitoring, timeout=3) t = cresource.get("api/workers", params_dict={"status": True}).body_string() all_workers = json.loads(t) bad_workers = [] expected_running, expected_stopped = parse_celery_workers(all_workers) celery = Celery() celery.config_from_object(settings) worker_responses = celery.control.ping(timeout=10) pings = parse_celery_pings(worker_responses) for hostname in expected_running: if hostname not in pings or not pings[hostname]: bad_workers.append("* {} celery worker down".format(hostname)) for hostname in expected_stopped: if hostname in pings: bad_workers.append("* {} celery worker is running when we expect it to be stopped.".format(hostname)) if bad_workers: return ServiceStatus(False, "\n".join(bad_workers)) is_alive = heartbeat.is_alive() return ServiceStatus(is_alive, "OK" if is_alive else "DOWN")
def check_celery_health(): ret = {} celery_monitoring = getattr(settings, 'CELERY_FLOWER_URL', None) worker_status = "" if celery_monitoring: cresource = Resource(celery_monitoring, timeout=3) all_workers = {} try: t = cresource.get("api/workers").body_string() all_workers = json.loads(t) except Exception, ex: pass worker_ok = '<span class="label label-success">OK</span>' worker_bad = '<span class="label label-important">Down</span>' tasks_ok = 'label-success' tasks_full = 'label-warning' worker_info = [] for hostname, w in all_workers.items(): status_html = mark_safe(worker_ok if w['status'] else worker_bad) tasks_class = tasks_full if w['running_tasks'] == w['concurrency'] else tasks_ok tasks_html = mark_safe('<span class="label %s">%d / %d</span> :: %d' % (tasks_class, w['running_tasks'], w['concurrency'], w['completed_tasks'])) worker_info.append(' '.join([hostname, status_html, tasks_html])) worker_status = '<br>'.join(worker_info)
def _get_from_id_or_pk(self, id=None, pk=None, **kwargs): """ Returns an object given an id or pk, request directly with the get_resource_url_detail method without filtering on ids (as Django's ORM do). """ clone = self._clone() # Instantiation of clone.model is necessary because we can't set # a staticmethod for get_resource_url_detail and avoid to set it # for all model without relying on get_resource_url_list instance = clone.model() if pk is None: instance.id = id else: instance.pk = pk resource = Resource(instance.get_resource_url_detail(), headers=ROA_HEADERS, filters=ROA_FILTERS, **kwargs) try: parameters = clone.query.parameters logger.debug( u"""Retrieving : "%s" through %s with parameters "%s" """ % (clone.model.__name__, resource.uri, force_unicode(parameters)) ) response = resource.get(**parameters) except Exception, e: raise ROAException(e)
def system_ajax(request): """ Utility ajax functions for polling couch and celerymon """ type = request.GET.get('api', None) task_limit = getattr(settings, 'CELERYMON_TASK_LIMIT', 12) celery_monitoring = getattr(settings, 'CELERY_FLOWER_URL', None) db = XFormInstance.get_db() if type == "_active_tasks": tasks = [] if is_bigcouch() else filter( lambda x: x['type'] == "indexer", db.server.active_tasks()) #for reference structure is: # tasks = [{'type': 'indexer', 'pid': 'foo', 'database': 'mock', # 'design_document': 'mockymock', 'progress': 0, # 'started_on': 1349906040.723517, 'updated_on': 1349905800.679458, # 'total_changes': 1023}, # {'type': 'indexer', 'pid': 'foo', 'database': 'mock', # 'design_document': 'mockymock', 'progress': 70, # 'started_on': 1349906040.723517, 'updated_on': 1349905800.679458, # 'total_changes': 1023}] return json_response(tasks) elif type == "_stats": return json_response({}) elif type == "_logs": pass elif type == 'pillowtop': return json_response(get_all_pillows_json()) elif type == 'stale_pillows': es_index_status = [ check_case_es_index(interval=3), check_xform_es_index(interval=3), check_reportcase_es_index(interval=3), check_reportxform_es_index(interval=3) ] return json_response(es_index_status) if celery_monitoring: cresource = Resource(celery_monitoring, timeout=3) if type == "flower_poll": ret = [] try: t = cresource.get("api/tasks", params_dict={ 'limit': task_limit }).body_string() all_tasks = json.loads(t) except Exception, ex: all_tasks = {} logging.error("Error with getting from celery_flower: %s" % ex) for task_id, traw in all_tasks.items(): # it's an array of arrays - looping through [<id>, {task_info_dict}] if 'name' in traw and traw['name']: traw['name'] = '.'.join(traw['name'].split('.')[-2:]) else: traw['name'] = None ret.append(traw) ret = sorted(ret, key=lambda x: x['succeeded'], reverse=True) return HttpResponse(json.dumps(ret), mimetype='application/json')
def setUp(self): auth = BasicAuth('john', 'teste') res = Resource(server, filters=[auth, ]) r = res.get('/authenticate') data = loads(r.body_string()) self.auth = AuthToken(data.get('token')) self.res = Resource(server, filters=[self.auth, ])
def getJSONResource(self, path, user, params_dict=None): name = user.credentials[0] res = Resource(self.baseurl, filters=[user]) output = res.get(path, None, params_dict=params_dict) self.assertEqual(200, output.status_int, "Wrong response code: " + name) response = json.loads(output.body_string()) return response
def get_epic_id(url, key, auth): resource = Resource(url + ('/rest/api/latest/issue/%s?expand=names' % key), filters=[auth]) response = resource.get(headers={'Content-Type': 'application/json'}) if response.status_int == 200: for field_id, field_name in json.loads(response.body_string())['names'].items(): if field_name == 'Epic Link': return field_id else: return None
def _getTarball(url, into_directory): '''unpack the specified tarball url into the specified directory''' resource = Resource(url, pool=connection_pool.getPool(), follow_redirect=True) response = resource.get( headers = {'Authorization': 'token ' + settings.getProperty('github', 'authtoken')}, ) logger.debug('getting file: %s', url) # TODO: there's an MD5 in the response headers, verify it access_common.unpackTarballStream(response.body_stream(), into_directory)
def _get(self, url): print "Running GET on URL:", url auth = BasicAuth(self.login, self.password) res = Resource(url, filters=[auth]) r = res.get() if r['Content-Type'].startswith('application/json'): return json.loads(r.body_string()) else: return r.body_string()
def testAuthentication(self): r = self.res.get('/') self.assertEqual(200, r.status_int) try: res = Resource(server) r = res.get('/') self.assertTrue(False) except Unauthorized, e: self.assertTrue(True)
def testAuthentication(self): r = self.res.get('/') self.assertEqual(200, r.status_int) try: res = Resource(server) r = res.get('/') self.assertTrue(False) except Unauthorized, e: self.assertTrue(True)
def system_ajax(request): """ Utility ajax functions for polling couch and celerymon """ type = request.GET.get('api', None) task_limit = getattr(settings, 'CELERYMON_TASK_LIMIT', 12) celery_monitoring = getattr(settings, 'CELERY_FLOWER_URL', None) db = XFormInstance.get_db() if type == "_active_tasks": tasks = [] if is_bigcouch() else filter(lambda x: x['type'] == "indexer", db.server.active_tasks()) #for reference structure is: # tasks = [{'type': 'indexer', 'pid': 'foo', 'database': 'mock', # 'design_document': 'mockymock', 'progress': 0, # 'started_on': 1349906040.723517, 'updated_on': 1349905800.679458, # 'total_changes': 1023}, # {'type': 'indexer', 'pid': 'foo', 'database': 'mock', # 'design_document': 'mockymock', 'progress': 70, # 'started_on': 1349906040.723517, 'updated_on': 1349905800.679458, # 'total_changes': 1023}] return json_response(tasks) elif type == "_stats": return json_response({}) elif type == "_logs": pass elif type == 'pillowtop': return json_response(get_all_pillows_json()) elif type == 'stale_pillows': es_index_status = [ check_case_es_index(interval=3), check_xform_es_index(interval=3), check_reportcase_es_index(interval=3), check_reportxform_es_index(interval=3) ] return json_response(es_index_status) if celery_monitoring: cresource = Resource(celery_monitoring, timeout=3) if type == "flower_poll": ret = [] try: t = cresource.get("api/tasks", params_dict={'limit': task_limit}).body_string() all_tasks = json.loads(t) except Exception, ex: all_tasks = {} logging.error("Error with getting from celery_flower: %s" % ex) for task_id, traw in all_tasks.items(): # it's an array of arrays - looping through [<id>, {task_info_dict}] if 'name' in traw and traw['name']: traw['name'] = '.'.join(traw['name'].split('.')[-2:]) else: traw['name'] = None ret.append(traw) ret = sorted(ret, key=lambda x: x['succeeded'], reverse=True) return HttpResponse(json.dumps(ret), mimetype = 'application/json')
def get_stats(celery_monitoring, status_only=False, refresh=False): cresource = Resource(celery_monitoring, timeout=3) endpoint = "api/workers" params = {'refresh': 'true'} if refresh else {} if status_only: params['status'] = 'true' try: t = cresource.get(endpoint, params_dict=params).body_string() return json.loads(t) except Exception: return {}
def get_stats(celery_monitoring, status_only=False, refresh=False): cresource = Resource(celery_monitoring, timeout=3) endpoint = "api/workers" params = {'refresh': 'true'} if refresh else {} if status_only: params['status'] = 'true' try: t = cresource.get(endpoint, params_dict=params).body_string() return json.loads(t) except Exception: return {}
def get_epic_id(url, key, auth): resource = Resource(url + ('/rest/api/latest/issue/%s?expand=names' % key), filters=[auth]) response = resource.get(headers={'Content-Type': 'application/json'}) if response.status_int == 200: for field_id, field_name in json.loads( response.body_string())['names'].items(): if field_name == 'Epic Link': return field_id else: return None
def testLogin(): url = '%s/users/me' % ( Registry_Base_URL ) headers = { } auth = _registryAuthFilter() resource = Resource(url, pool=connection_pool.getPool(), filters=[auth]) logger.debug('test login...') response = resource.get( headers = headers )
def getJSON(self, url): resource = Resource(url, filters=[auth]) response = resource.get(headers={'Content-Type': 'application/json'}) if response.status_int == 200: # Not all resources will return 200 on success. There are other success status codes. Like 204. We've read # the documentation for though and know what to expect here. versions = json.loads(response.body_string()) return versions else: print response.status_int # print response. return None
def setUp(self): auth = BasicAuth('john', 'teste') res = Resource(server, filters=[ auth, ]) r = res.get('/authenticate') data = loads(r.body_string()) self.auth = AuthToken(data.get('token')) self.res = Resource(server, filters=[ self.auth, ])
def getJSON(self, url): resource = Resource(url, filters=[auth]) response = resource.get(headers={'Content-Type': 'application/json'}) if response.status_int == 200: # Not all resources will return 200 on success. There are other success status codes. Like 204. We've read # the documentation for though and know what to expect here. versions = json.loads(response.body_string()) return versions else: print response.status_int # print response. return None
def test_custom_serializer(self): register_serializer('custom', 'examples.django_roa_client.serializers') initial_roa_format_setting = settings.ROA_FORMAT settings.ROA_FORMAT = 'custom' page = RemotePage.objects.create(title=u'A custom serialized page') self.assertEqual(repr(page), '<RemotePage: A custom serialized page (1)>') r = Resource('http://127.0.0.1:8081/django_roa_server/remotepage/', filters=ROA_FILTERS) response = r.get(**{'format': 'custom'}) self.assertEqual(repr(response.body_string()), '\'<?xml version="1.0" encoding="utf-8"?>\\n<django-test version="1.0">\\n <object pk="1" model="django_roa_server.remotepage">\\n <field type="CharField" name="title">A custom serialized page</field>\\n </object>\\n</django-test>\'') self.assertEqual(len(RemotePage.objects.all()), 1) page = RemotePage.objects.get(id=page.id) self.assertEqual(repr(page), '<RemotePage: A custom serialized page (1)>') settings.ROA_FORMAT = initial_roa_format_setting
def iterator(self): """ An iterator over the results from applying this QuerySet to the remote web service. """ resource = Resource(self.model.get_resource_url_list(), filters=ROA_FILTERS, **ROA_SSL_ARGS) try: parameters = self.query.parameters logger.debug(u"""Requesting: "%s" through %s with parameters "%s" """ % ( self.model.__name__, resource.uri, force_unicode(parameters))) for key in parameters: match_object = re.search('^filter_(\w+)', key) if match_object: parameters[match_object.group(1)] = parameters[key] del(parameters[key]) response = resource.get(headers=self._get_http_headers(), **parameters) except ResourceNotFound: return except Exception as e: raise ROAException(e) response = force_unicode(response.body_string()).encode(DEFAULT_CHARSET) for local_name, remote_name in ROA_MODEL_NAME_MAPPING: response = response.replace(remote_name, local_name) # Deserializing objects: data = self.model.get_parser().parse(StringIO(response)) # Check limit_start and limit_stop arguments for pagination and only # slice data if they are both numeric and there are results left to go. # We only perform this check on lists. limit_start = getattr(self.query, 'limit_start', None) limit_stop = getattr(self.query, 'limit_stop', None) if (isinstance(limit_start, int) and isinstance(limit_stop, int) and limit_stop - limit_start < len(data) and limit_stop <= len(data) and isinstance(data, list)): data = data[limit_start:limit_stop] # [] is the case of empty no-paginated result if data != []: serializer = self.model.get_serializer(data=data) if not serializer.is_valid(): raise ROAException(u'Invalid deserialization for %s model: %s' % (self.model, serializer.errors)) pk_name = self.model._meta.pk.name for i, item in enumerate(serializer.validated_data): item[pk_name] = data[i][pk_name] yield self.model(**item)
def iterator(self): """ An iterator over the results from applying this QuerySet to the remote web service. """ resource = Resource(self.model.get_resource_url_list(), filters=ROA_FILTERS, **ROA_SSL_ARGS) try: parameters = self.query.parameters logger.debug( u"""Requesting: "%s" through %s with parameters "%s" """ % (self.model.__name__, resource.uri, force_unicode(parameters))) response = resource.get(headers=self._get_http_headers(), **parameters) except ResourceNotFound: return except Exception as e: raise ROAException(e) response = force_unicode( response.body_string()).encode(DEFAULT_CHARSET) # Deserializing objects: data = self.model.get_parser().parse(StringIO(response)) # Check limit_start and limit_stop arguments for pagination and only # slice data if they are both numeric and there are results left to go. # We only perform this check on lists. limit_start = getattr(self.query, 'limit_start', None) limit_stop = getattr(self.query, 'limit_stop', None) if (isinstance(limit_start, int) and isinstance(limit_stop, int) and limit_stop - limit_start < len(data) and limit_stop <= len(data) and isinstance(data, list)): data = data[limit_start:limit_stop] # [] is the case of empty no-paginated result if data != []: serializer = self.model.get_serializer(data=data) if not serializer.is_valid(): raise ROAException( u'Invalid deserialization for %s model: %s' % (self.model, serializer.errors)) i = 0 for obj in serializer.validated_data: obj['id'] = serializer.initial_data[i].get('id', None) i += 1 yield self.model(**obj)
def get_issue(key): """ Given an issue key (i.e. JRA-9) return the JSON representation of it. This is the only place where we deal with JIRA's REST API. """ print('Fetching ' + key) # we need to expand subtasks and links since that's what we care about here. resource = Resource(url + ('/rest/api/latest/issue/%s' % key), pool_instance=pool, filters=[auth]) response = resource.get(headers = {'Content-Type' : 'application/json'}) if response.status_int == 200: # Not all resources will return 200 on success. There are other success status codes. Like 204. We've read # the documentation for though and know what to expect here. issue = json.loads(response.body_string()) return issue else: return None
def url_from_api(artist, song): """Get URL to lyrics article from lyrics.wikia.com API""" r = Resource('http://lyrics.wikia.com', headers={'Accept':'application/json'}) json = r.get('/api.php', fmt='json', artist=proper_unicode(artist), song=proper_unicode(song)) if not isinstance(json, basestring): # Compatibility for restkit >= 0.9 json = json.unicode_body json = json[6:].replace("'", '"') # replace needed because wikia doesn't provide us with valid JSON. [6:] needed because it says "song = " first. d = simplejson.loads(json) if d['lyrics'] == 'Not found': raise ValueError("No lyrics for {song} by {artist}".format(song=song, artist=artist)) else: print >>sys.stderr, 'url =', d['url'] return unicode(unquote(d['url'].encode(charset)), charset)
def _get_from_id_or_pk(self, id=None, pk=None, **kwargs): """ Returns an object given an id or pk, request directly with the get_resource_url_detail method without filtering on ids (as Django's ORM do). """ clone = self._clone() # Instantiation of clone.model is necessary because we can't set # a staticmethod for get_resource_url_detail and avoid to set it # for all model without relying on get_resource_url_list instance = clone.model() if pk is None: instance.id = id else: instance.pk = pk extra_args = {} extra_args.update(kwargs) extra_args.update(ROA_SSL_ARGS) resource = Resource(instance.get_resource_url_detail(), filters=ROA_FILTERS, **extra_args) try: parameters = clone.query.parameters logger.debug(u"""Retrieving : "%s" through %s with parameters "%s" """ % ( clone.model.__name__, resource.uri, force_unicode(parameters))) response = resource.get(headers=self._get_http_headers(), **parameters) except ResourceNotFound: raise Http404 except Exception as e: raise ROAException(e) response = force_unicode(response.body_string()).encode(DEFAULT_CHARSET) for local_name, remote_name in ROA_MODEL_NAME_MAPPING: response = response.replace(remote_name, local_name) data = self.model.get_parser().parse(StringIO(response)) serializer = self.model.get_serializer(data=data) if not serializer.is_valid(): raise ROAException(u'Invalid deserialization for %s model: %s' % (self.model, serializer.errors)) if pk is None: return self.model(id=id, **serializer.validated_data) else: pk_name = self.model._meta.pk.name serializer.validated_data[pk_name] = pk return self.model(**serializer.validated_data)
def is_pullrequest_tested(pr,token,owner,repo): url='https://api.github.com/repos/'+owner+'/'+repo+'/pulls/'+pr+'/files' resource = Resource(url, pool=pool) headers = {'Content-Type' : 'application/json' } headers['Authorization'] = 'token %s' % token response = resource.get(headers = headers) files = json.loads(response.body_string()) #print 'NORMAL:', json.dumps(files, sort_keys=True,indent=2) for x in files: if 'test' in x['filename'].lower(): return True; return False;
def load_data(request): loaded_case = [] for num in range(115, 649): auth = BasicAuth(JIRA_USR, JIRA_PWD) resource = Resource(BASE_URL + "EPS-" + str(num), filters=[auth]) response = resource.get(headers={'Content-Type': 'application/json'}) if response.status_int == 200: json_obj = json.loads(response.body_string()) loaded_case.append(bulkload.load_case(json_obj).case_key) else: c = { "error_message": "Unable to fetch data from JIRA" } return render(request, "mining/load_data.html", c) return render(request, "mining/load_data.html", {"cases": loaded_case})
def check_heartbeat(): celery_monitoring = getattr(settings, 'CELERY_FLOWER_URL', None) if celery_monitoring: cresource = Resource(celery_monitoring, timeout=3) t = cresource.get("api/workers", params_dict={'status': True}).body_string() all_workers = json.loads(t) bad_workers = [] for hostname, status in all_workers.items(): if not status: bad_workers.append('* {} celery worker down'.format(hostname)) if bad_workers: return ServiceStatus(False, '\n'.join(bad_workers)) is_alive = heartbeat.is_alive() return ServiceStatus(is_alive, "OK" if is_alive else "DOWN")
def _get_from_id_or_pk(self, id=None, pk=None, **kwargs): """ Returns an object given an id or pk, request directly with the get_resource_url_detail method without filtering on ids (as Django's ORM do). """ clone = self._clone() # Instantiation of clone.model is necessary because we can't set # a staticmethod for get_resource_url_detail and avoid to set it # for all model without relying on get_resource_url_list instance = clone.model() if pk is None: instance.id = id else: instance.pk = pk extra_args = {} extra_args.update(kwargs) extra_args.update(ROA_SSL_ARGS) resource = Resource(instance.get_resource_url_detail(), filters=ROA_FILTERS, **extra_args) try: parameters = clone.query.parameters logger.debug( u"""Retrieving : "%s" through %s with parameters "%s" """ % (clone.model.__name__, resource.uri, force_unicode(parameters))) response = resource.get(headers=self._get_http_headers(), **parameters) except Exception as e: raise ROAException(e) response = force_unicode( response.body_string()).encode(DEFAULT_CHARSET) for local_name, remote_name in ROA_MODEL_NAME_MAPPING: response = response.replace(remote_name, local_name) # Deserializing objects: data = self.model.get_parser().parse(StringIO(response)) serializer = self.model.get_serializer(data=data) if not serializer.is_valid(): raise ROAException( u'Invalid deserialization for {} model: {}'.format( self.model, serializer.errors)) return serializer.object
def check_rabbitmq(): if settings.BROKER_URL.startswith('amqp'): amqp_parts = settings.BROKER_URL.replace('amqp://', '').split('/') mq_management_url = amqp_parts[0].replace('5672', '15672') vhost = amqp_parts[1] try: mq = Resource('http://%s' % mq_management_url, timeout=2) vhost_dict = json.loads(mq.get('api/vhosts', timeout=2).body_string()) for d in vhost_dict: if d['name'] == vhost: return ServiceStatus(True, 'RabbitMQ OK') return ServiceStatus(False, 'RabbitMQ Offline') except Exception as e: return ServiceStatus(False, "RabbitMQ Error: %s" % e) else: return ServiceStatus(False, "RabbitMQ Not configured")
def _kill_stale_workers(): celery_monitoring = getattr(settings, 'CELERY_FLOWER_URL', None) if celery_monitoring: cresource = Resource(celery_monitoring, timeout=3) t = cresource.get("api/workers", params_dict={'status': True}).body_string() all_workers = json.loads(t) expected_running, expected_stopped = parse_celery_workers(all_workers) celery = Celery() celery.config_from_object(settings) worker_responses = celery.control.ping(timeout=10) pings = parse_celery_pings(worker_responses) hosts_to_stop = [hostname for hostname in expected_stopped if hostname in pings] if hosts_to_stop: celery.control.broadcast('shutdown', destination=hosts_to_stop)
def check_heartbeat(): celery_monitoring = getattr(settings, 'CELERY_FLOWER_URL', None) if celery_monitoring: cresource = Resource(celery_monitoring, timeout=3) t = cresource.get("api/workers", params_dict={ 'status': True }).body_string() all_workers = json.loads(t) bad_workers = [] for hostname, status in all_workers.items(): if not status: bad_workers.append('* {} celery worker down'.format(hostname)) if bad_workers: return ServiceStatus(False, '\n'.join(bad_workers)) is_alive = heartbeat.is_alive() return ServiceStatus(is_alive, "OK" if is_alive else "DOWN")
def check_rabbitmq(): ret = {} mq_status = "Unknown" if settings.BROKER_URL.startswith('amqp'): amqp_parts = settings.BROKER_URL.replace('amqp://', '').split('/') mq_management_url = amqp_parts[0].replace('5672', '15672') vhost = amqp_parts[1] try: mq = Resource('http://%s' % mq_management_url, timeout=2) vhost_dict = json.loads( mq.get('api/vhosts', timeout=2).body_string()) mq_status = "Offline" for d in vhost_dict: if d['name'] == vhost: mq_status = 'RabbitMQ OK' except Exception, ex: mq_status = "RabbitMQ Error: %s" % ex
def test_custom_serializer(self): register_serializer('custom', 'examples.django_roa_client.serializers') initial_roa_format_setting = settings.ROA_FORMAT settings.ROA_FORMAT = 'custom' page = RemotePage.objects.create(title=u'A custom serialized page') self.assertEqual(repr(page), '<RemotePage: A custom serialized page (1)>') r = Resource('http://127.0.0.1:8081/django_roa_server/remotepage/', filters=ROA_FILTERS) response = r.get(**{'format': 'custom'}) self.assertEqual( repr(response.body_string()), '\'<?xml version="1.0" encoding="utf-8"?>\\n<django-test version="1.0">\\n <object pk="1" model="django_roa_server.remotepage">\\n <field type="CharField" name="title">A custom serialized page</field>\\n </object>\\n</django-test>\'' ) self.assertEqual(len(RemotePage.objects.all()), 1) page = RemotePage.objects.get(id=page.id) self.assertEqual(repr(page), '<RemotePage: A custom serialized page (1)>') settings.ROA_FORMAT = initial_roa_format_setting
def get_jira_issue(server_base_url, user, password, key, fields=None): verbose = False # A pool of connections #pool = SimplePool(keepalive = 2) # This sends the user and password with the request. auth = BasicAuth(user, password) resource = Resource(server_base_url + 'rest/api/2/issue/%s?fields=%s' % (key, fields), filters=[auth]) try: response = resource.get(headers={'Content-Type': 'application/json'}) except Exception, err: print "EXCEPTION: %s " % str(err) return
def system_info(request): def human_bytes(bytes): #source: https://github.com/bartTC/django-memcache-status bytes = float(bytes) if bytes >= 1073741824: gigabytes = bytes / 1073741824 size = '%.2fGB' % gigabytes elif bytes >= 1048576: megabytes = bytes / 1048576 size = '%.2fMB' % megabytes elif bytes >= 1024: kilobytes = bytes / 1024 size = '%.2fKB' % kilobytes else: size = '%.2fB' % bytes return size context = get_hqadmin_base_context(request) context['couch_update'] = request.GET.get('couch_update', 5000) context['celery_update'] = request.GET.get('celery_update', 10000) context['hide_filters'] = True if hasattr(os, 'uname'): context['current_system'] = os.uname()[1] #from dimagi.utils import gitinfo #context['current_ref'] = gitinfo.get_project_info() #removing until the async library is updated context['current_ref'] = {} if settings.COUCH_USERNAME == '' and settings.COUCH_PASSWORD == '': couchlog_resource = Resource("http://%s/" % (settings.COUCH_SERVER_ROOT)) else: couchlog_resource = Resource("http://%s:%s@%s/" % (settings.COUCH_USERNAME, settings.COUCH_PASSWORD, settings.COUCH_SERVER_ROOT)) try: #todo, fix on bigcouch/cloudant context['couch_log'] = "Will be back online shortly" if is_bigcouch() \ else couchlog_resource.get('_log', params_dict={'bytes': 2000 }).body_string() except Exception, ex: context['couch_log'] = "unable to open couch log: %s" % ex
def _get_page(self, path): """if we're specifying a cluster then verify that a cluster is set""" res = Resource(self.host) page = res.get(path=path) data = page.body_string() body = None try: body = json.loads(data) except ValueError: body = json.loads(data[:-3]) # test what was returned, see if any exceptions need to be raise if not body: raise HelixException("body for path {0} is empty".format(path)) if isinstance(body, dict) and "ERROR" in body: raise HelixException(body["ERROR"]) return body
def jiraConnection(query,auth): try: resource = Resource(query, filters=[auth]) response = resource.get(headers = {'Content-Type' : 'application/json'}) except errors.Unauthorized: print("Error::Incorrect Username/Pwd combination, please resubmit. If you think you type in the right credential, please try log in from broswer first. It might caused by Jira captcha validation") return None if response.status_int == 200: """ issue is the JSON representation of the issue """ issue = json.loads(response.body_string()) return issue else: return None
def iterator(self): """ An iterator over the results from applying this QuerySet to the remote web service. """ resource = Resource( self.model.get_resource_url_list(**self.query.filters), filters=ROA_FILTERS, **ROA_SSL_ARGS) try: parameters = self.query.parameters logger.debug( u"""Requesting: "%s" through %s with parameters "%s" """ % (self.model.__name__, resource.uri, force_unicode(parameters))) response = resource.get(headers=self._get_http_headers(), **parameters) except ResourceNotFound: return except Exception as e: raise ROAException(e) response = force_unicode( response.body_string()).encode(DEFAULT_CHARSET) # Deserializing objects: data = self.model.get_parser().parse(StringIO(response)) # [] is the case of empty no-paginated result if data != []: serializer = self.model.get_serializer(data=data) if not serializer.is_valid(): raise ROAException( u'Invalid deserialization for {} model: {}'.format( self.model, serializer.errors)) for obj in serializer.object: yield obj
def hb_check(): celery_monitoring = getattr(settings, 'CELERY_FLOWER_URL', None) if celery_monitoring: try: cresource = Resource(celery_monitoring, timeout=3) t = cresource.get("api/workers").body_string() all_workers = json.loads(t) bad_workers = [] for hostname, w in all_workers.items(): if not w['status']: bad_workers.append( '* {} celery worker down'.format(hostname)) if bad_workers: return (False, '\n'.join(bad_workers)) else: hb = heartbeat.is_alive() except: hb = False else: try: hb = heartbeat.is_alive() except: hb = False return (hb, None)
class TestEndpoint(unittest.TestCase): def setUp(self): auth = BasicAuth('john', 'teste') res = Resource(server, filters=[ auth, ]) r = res.get('/authenticate') data = loads(r.body_string()) self.auth = AuthToken(data.get('token')) self.res = Resource(server, filters=[ self.auth, ]) def testAuthentication(self): r = self.res.get('/') self.assertEqual(200, r.status_int) try: res = Resource(server) r = res.get('/') self.assertTrue(False) except Unauthorized, e: self.assertTrue(True)