Exemplo n.º 1
0
    def getBusesPositions(self):
        lcord = []
        conn = Connection("http://mc933.lab.ic.unicamp.br:8017/onibus")
        response = conn.request_get("")

        buses = json.loads(response["body"])

        for i in buses:
            response = conn.request_get(str(i))
            lcord.append(json.loads(response["body"]))
        #conn.request_put("/sidewinder", {'color': 'blue'}, headers={'content-type':'application/json', 'accept':'application/json'})
        return lcord
Exemplo n.º 2
0
 def getBusesPositions(self):
     lcord = []
     conn = Connection("http://mc933.lab.ic.unicamp.br:8017/onibus")
     response = conn.request_get("")
     
     buses = json.loads(response["body"])
     
     for i in buses:
         response = conn.request_get(str(i))
         lcord.append(json.loads(response["body"]))
     #conn.request_put("/sidewinder", {'color': 'blue'}, headers={'content-type':'application/json', 'accept':'application/json'})
     return lcord
Exemplo n.º 3
0
    def submit_rdfxml_from_url(self,
                               url_to_file,
                               headers={"Accept": "application/rdf+xml"}):
        """Convenience method - downloads the file from a given url, and then pushes that
           into the meta store. Currently, it doesn't put it through a parse-> reserialise
           step, so that it could handle more than rdf/xml on the way it but it is a
           future possibility."""
        import_rdf_connection = Connection(url_to_file)
        response = import_rdf_connection.request_get("", headers=headers)

        if response.get('headers') and response.get('headers').get(
                'status') in ['200', '204']:
            request_headers = {}

            # Lowercase all response header fields, to make matching easier.
            # According to HTTP spec, they should be case-insensitive
            response_headers = response['headers']
            for header in response_headers:
                response_headers[header.lower()] = response_headers[header]

            # Set the body content
            body = response.get('body').encode('UTF-8')

            # Get the response mimetype
            rdf_type = response_headers.get('content-type', None)

            return self._put_rdf(body, mimetype=rdf_type)
Exemplo n.º 4
0
def _get_flavor(attributes, vm_id):
    conn = Connection(attributes["cm_nova_url"], username="", password="")
    tenant_id, x_auth_token = _get_keystone_tokens(attributes)
    resp = conn.request_get("/" + tenant_id +"/servers/" + vm_id, args={}, headers={'content-type':'application/json', 'accept':'application/json', 'x-auth-token':x_auth_token})
    status = resp[u'headers']['status']
    if status == '200' or status == '304':
        server = json.loads(resp['body'])
        flavor_id = server['server']['flavor']['id']
    else:
        log.error("Bad HTTP return code: %s" % status)
    resp = conn.request_get("/" + tenant_id +"/flavors/" + flavor_id, args={}, headers={'content-type':'application/json', 'accept':'application/json', 'x-auth-token':x_auth_token})
    status = resp[u'headers']['status']
    if status == '200' or status == '304':
        flavor = json.loads(resp['body'])
    else:
        log.error("_get_flavor: Bad HTTP return code: %s" % status)
    return flavor['flavor']
Exemplo n.º 5
0
def get_style_from_geoserver(request):
    if request.method == 'GET':
        layerName = request.GET.get('layer_name')
        baseUrl = openthingis.settings.GEOSERVER_REST_SERVICE
        conn = Connection(
            baseUrl, 
            username=openthingis.settings.GEOSERVER_USER, 
            password=openthingis.settings.GEOSERVER_PASS
        )
        
        layerInfo = conn.request_get("/layers/" + layerName + '.json')
        dict = layerInfo['body']
        layer = simplejson.loads(dict)
        deafultStyle = layer['layer']['defaultStyle']['name']
        
        sld = conn.request_get("/styles/" + deafultStyle + '.sld')
        sld_body = sld['body']
        return HttpResponse(sld_body, content_type="application/xml")
Exemplo n.º 6
0
def _get_images(attributes):
    conn = Connection(attributes["cm_nova_url"], username="", password="")
    tenant_id, x_auth_token = _get_keystone_tokens(attributes)
    resp = conn.request_get("/" + tenant_id + "/images", args={}, headers={'content-type':'application/json', 'accept':'application/json', 'x-auth-token':x_auth_token})
    status = resp[u'headers']['status']
    if status == '200' or status == '304':
        images = json.loads(resp['body'])
        return images['images']
    else:
        log.error("_get_images: Bad HTTP return code: %s" % status)
Exemplo n.º 7
0
class DiffsClient(object):

    _logger = logging.getLogger('DiffsClient')
    _logger.addHandler(logging.StreamHandler(sys.stderr))

    def __init__(self, agent_url, verbose=False):
        self._logger.setLevel(logging.DEBUG if verbose else logging.NOTSET)
        if not agent_url.endswith('/'):
            agent_url += '/'
        self.agent_url = agent_url
        base_url = urljoin(agent_url, 'rest')
        self._conn = Connection(base_url)
        self._conn = Connection(self.get_session_url())

    def get_session_url(self):
        url = '/diffs/sessions'
        response = self._post(url)
        return response['headers']['location']

    def get_diffs(self, pair_key, range_start, range_end):
        url = '/?pairKey={0}&range-start={1}&range-end={2}'.format(
                pair_key,
                range_start.strftime(DATETIME_FORMAT),
                range_end.strftime(DATETIME_FORMAT))
        response = self._get(url)
        return json.loads(response['body'])

    def get_diffs_zoomed(self, range_start, range_end, bucketing):
        "A dictionary of pair keys mapped to lists of bucketed diffs"
        url = '/zoom?range-start={0}&range-end={1}&bucketing={2}'.format(
                range_start.strftime(DATETIME_FORMAT),
                range_end.strftime(DATETIME_FORMAT),
                bucketing)
        response = self._get(url)
        return json.loads(response['body'])

    def _get(self, url):
        self._logger.debug("GET %s", self._rebuild_url(url))
        response = self._conn.request_get(url)
        self._logger.debug(response)
        return response
    
    def _post(self, url):
        self._logger.debug("POST %s", self._rebuild_url(url))
        response = self._conn.request_post(url)
        self._logger.debug(response)
        return response

    def _rebuild_url(self, url):
        return self._conn.url.geturl() + url

    def __repr__(self):
        return "DiffsClient(%s)" % repr(self.agent_url)
Exemplo n.º 8
0
 def restful_caller(self, url, method, parameters, http_method):
     try:
         conn = Connection(url)
     except:
         return 'Cant connect with ' + url
     ret = None
     if http_method.upper() == 'GET':
         try:
             ret = conn.request_get(resource=method, args=parameters,
                                    headers={'Content-type': 'text/xml', 'Accept': 'text/xml'})
         except:
             ret = 'Problem with method ' + method
     return ret
Exemplo n.º 9
0
def _get_VMs(attributes):
    conn = Connection(attributes["cm_nova_url"], username="", password="")
    tenant_id, x_auth_token = _get_keystone_tokens(attributes)
    resp = conn.request_get("/" + tenant_id +"/servers", args={}, headers={'content-type':'application/json', 'accept':'application/json', 'x-auth-token':x_auth_token})
    status = resp[u'headers']['status']
    if status == '200' or status == '304':
        servers = json.loads(resp['body'])
        i = 0
        vms = []
        for r in servers['servers']:
            vms.append(r['name'])
            i = i+1
        return vms
    else:
        log.error("_get_VMs: Bad HTTP return code: %s" % status)
Exemplo n.º 10
0
class Tinyurl(object):
    def __init__(self):
        self._conn = Connection(TINYURL_ENDPOINT)
        # TODO test availability
        self.active = True
    
    def get(self, url):
        # Handcraft the ?url=XXX line as Tinyurl doesn't understand urlencoded
        # params - at least, when I try it anyway...
        response = self._conn.request_get("?%s=%s" % (TINYURL_PARAM, url))
        http_status = response['headers'].get('status')
        if http_status == "200":
            return response.get('body').encode('UTF-8')
        else:
            raise ConnectionError
Exemplo n.º 11
0
def test_rest(myLat, myLng):
	# http://api.spotcrime.com/crimes.json?lat=40.740234&lon=-73.99103400000001&radius=0.01&callback=jsonp1339858218680&key=MLC
	spotcrime_base_url = "http://api.spotcrime.com"
	
	conn = Connection(spotcrime_base_url)

	resp = conn.request_get("/crimes.json", args={	'lat'	: myLat,
													'lon'	: myLng,		
													'radius': '0.01',
													'key' 	: 'MLC'},
											headers={'Accept': 'text/json'})
	
	
	resp_body = resp["body"]
	return resp_body
Exemplo n.º 12
0
class Tinyurl(object):
    def __init__(self):
        self._conn = Connection(TINYURL_ENDPOINT)
        # TODO test availability
        self.active = True

    def get(self, url):
        # Handcraft the ?url=XXX line as Tinyurl doesn't understand urlencoded
        # params - at least, when I try it anyway...
        response = self._conn.request_get("?%s=%s" % (TINYURL_PARAM, url))
        http_status = response['headers'].get('status')
        if http_status == "200":
            return response.get('body').encode('UTF-8')
        else:
            raise ConnectionError
Exemplo n.º 13
0
def _get_VM(attributes):
    conn = Connection(attributes["cm_nova_url"], username="", password="")
    tenant_id, x_auth_token = _get_keystone_tokens(attributes)
    resp = conn.request_get("/" + tenant_id +"/servers/detail", args={}, headers={'content-type':'application/json', 'accept':'application/json', 'x-auth-token':x_auth_token})
    status = resp[u'headers']['status']
    found = 0
    if status == '200' or status == '304':
        servers = json.loads(resp['body'])
        for vm in servers['servers']:
            if attributes['name'] == vm['name']:
                found = 1
                return vm
        if found == 0:
            #return False
            raise ResourceException("vm %s not found" % attributes['name'])
    else:
        log.error("_get_VM: Bad HTTP return code: %s" % status)
Exemplo n.º 14
0
def test_rest(myLat, myLng):
    # http://api.spotcrime.com/crimes.json?lat=40.740234&lon=-73.99103400000001&radius=0.01&callback=jsonp1339858218680&key=MLC
    spotcrime_base_url = "http://api.spotcrime.com"

    conn = Connection(spotcrime_base_url)

    resp = conn.request_get("/crimes.json",
                            args={
                                'lat': myLat,
                                'lon': myLng,
                                'radius': '0.01',
                                'key': 'MLC'
                            },
                            headers={'Accept': 'text/json'})

    resp_body = resp["body"]
    return resp_body
Exemplo n.º 15
0
class Server:

    def __init__(self, root_url="http://led-o-matic.appspot.com"):
    	self.root_url = root_url
    	self.conn = Connection(self.root_url)
    	self.name = ""

    def getPinStatus(self, pins_name, pin_id):
        request = self.name + '/' + pins_name + '/' + pin_id
        response = self.conn.request_get(request)
        return response['body']
  

    def login(self, name):
		self.name = name
		response = self.conn.request_post('/' + name)
		return self.root_url + '/' + self.name + response['body']
class MeaningRecognitionAPI(object):
	
	def __init__(self, url, app_id = None, app_key = None):
		self.url = url
		self.app_id = app_id
		self.app_key = app_key
		self.connection = Connection(self.url)
    
	def recognize(self, text_to_recognize):
		args = { 'body': text_to_recognize.encode('utf-8') }
		if (self.app_id):
			args['app_id'] = self.app_id
		if (self.app_key):
			args['app_key'] = self.app_key
		result = self.connection.request_get('/v1/disambiguate', args= args, headers={'Accept':'text/json'})
		if (result['headers']['status'] != '200'):
			raise IOError('Failed to make disambiguation request', result)
		return DisambiguationResult(result["body"])
Exemplo n.º 17
0
def get_tariff(tariff_id, phone):
    # Should also work with https protocols
    rest_user = "******"
    rest_pass = "******"
    rest_url = "http://test.lincom3000.com.ua/api"

    conn = Connection(rest_url, username=rest_user, password=rest_pass)

    #nibble_rate
    t = "/tariff/%i/%s/" % (int(tariff_id), phone)
    response = conn.request_get(t, headers={'Accept':'text/json'})
    headers = response.get('headers')
    status = headers.get('status', headers.get('Status'))

    if status in ["200", 200]:
        body = simplejson.loads(response.get('body').encode('UTF-8'))
        return body.get('rate')
    else:
        return None
Exemplo n.º 18
0
class JajahTTSClient(object):
    '''
    classdocs
    '''
    def __init__(self):
        '''
        Constructor
        '''
        self.ip="184.73.181.226"
        self.port="8090"
        self.protocol="http"
        self.path="/text.php"
        
    def set_connection(self):
        self.base_url= self.protocol + "://" + self.ip +":"+ str(self.port) + self.path
        self.conn = Connection(self.base_url)
    
    def send_text_message_to_jajah_tts(self,message):
        response = self.conn.request_get("?text="+message)
        return response['headers']['status'], response['body']
Exemplo n.º 19
0
def getIncidents(numberOfIncidents, sortedby):
	base_url = PAGER_DUTY
	conn = Connection(base_url)

	yesterdayDateTime, todayDateTime = getLast24Hours()
	fields = 'status,created_on,assigned_to_user'

	# Specify authorization token
	# Specify content type - json
	resp = conn.request_get("/api/v1/incidents", args={'limit': numberOfIncidents, 'since': yesterdayDateTime, 'until': todayDateTime, 'sort_by' : sortedby, 'fields' : fields}, headers={'Authorization': 'Token token=' + AUTHORIZATION_CODE, 'content-type':'application/json', 'accept':'application/json'})
	status = resp[u'headers']['status']
	body = json.loads(resp[u'body'])

	# check that we got a successful response (200) 
	if status == '200':
		print json.dumps(body, sort_keys=False, indent=4, separators=(',', ': '))
	   
	else:
	    print 'Error status code: ', status
	    print "Response", json.dumps(body, sort_keys=False, indent=4, separators=(',', ': '))
def main():
    logging.basicConfig(level=logging.DEBUG)
    try:
        os.remove('out.sqlite3')
    except OSError as e:
        if e.errno != 2:
            raise

    db = create_database('out.sqlite3')

    logging.info('requesting new topic tree...')
    base_url = 'http://www.khanacademy.org/api/v1/'
    conn = Connection(base_url)
    response = conn.request_get('/topictree')
    logging.info('parsing json response...')
    tree = json.loads(response.get('body'))
    logging.info('writing to file...')
    with open('../topictree', 'w') as f:
        f.write(json.dumps(tree))

    logging.info('loading topic tree file...')
    with open('../topictree', 'r') as f:
        tree = json.loads(f.read())

    # stick videos in one list and topics in another for future batch insert
    topics = []
    videos = []
    topicvideos = []
    logging.info('parsing tree...')
    parse_topic(tree, '', topics, videos, topicvideos)

    logging.info('inserting topics...')
    insert_topics(db, topics)
    logging.info('inserting videos...')
    insert_videos(db, videos)
    logging.info('inserting topicvideos...')
    insert_topicvideos(db, topicvideos)

    db.commit()
    logging.info('done!')
def main():
    logging.basicConfig(level=logging.DEBUG)
    try:
        os.remove('out.sqlite3')
    except OSError as e:
        if e.errno != 2:
            raise

    db = create_database('out.sqlite3')

    logging.info('requesting new topic tree...')
    base_url = 'http://www.khanacademy.org/api/v1/'
    conn = Connection(base_url)
    response = conn.request_get('/topictree')
    logging.info('parsing json response...')
    tree = json.loads(response.get('body'))
    logging.info('writing to file...')
    with open('../topictree', 'w') as f:
        f.write(json.dumps(tree))

    logging.info('loading topic tree file...')
    with open('../topictree', 'r') as f:
        tree = json.loads(f.read())

    # stick videos in one list and topics in another for future batch insert
    topics = []
    videos = []
    topicvideos = []
    logging.info('parsing tree...')
    parse_topic(tree, '', topics, videos, topicvideos)

    logging.info('inserting topics...')
    insert_topics(db, topics)
    logging.info('inserting videos...')
    insert_videos(db, videos)
    logging.info('inserting topicvideos...')
    insert_topicvideos(db, topicvideos)

    db.commit()
    logging.info('done!')
Exemplo n.º 22
0
 def submit_rdfxml_from_url(self, url_to_file, headers={"Accept":"application/rdf+xml"}):
     """Convenience method - downloads the file from a given url, and then pushes that
        into the meta store. Currently, it doesn't put it through a parse-> reserialise
        step, so that it could handle more than rdf/xml on the way it but it is a
        future possibility."""
     import_rdf_connection = Connection(url_to_file)
     response = import_rdf_connection.request_get("", headers=headers)
     
     if response.get('headers') and response.get('headers').get('status') in ['200', '204']:
         request_headers = {}
         
         # Lowercase all response header fields, to make matching easier. 
         # According to HTTP spec, they should be case-insensitive
         response_headers = response['headers']
         for header in response_headers:
             response_headers[header.lower()] = response_headers[header]
             
         # Set the body content
         body = response.get('body').encode('UTF-8')
         
         # Get the response mimetype
         rdf_type = response_headers.get('content-type', None)
         
         return self._put_rdf(body, mimetype=rdf_type)
Exemplo n.º 23
0
class RestClientEngine(object):

    __metaclass__ = Singleton

    def __init__(self, conn=None):
        if not conn:
            self.conn = Connection(GENERAL_PARAMETERS['base_url'],
                                   username=GENERAL_PARAMETERS['username'],
                                   password=GENERAL_PARAMETERS['password'])
        else:
            self.conn = conn

    def executeRequest(self,
                       identifier,
                       body=None,
                       query_parameter=None,
                       **kwargs):
        """
        Execute a Http request using pre configured configurations.
        :param identifier Identifier of the configuration block of the request.
        :param body Body content of the request. Default None.
        :param query_parameter Query parameters of URL. apis/apiServ?param1=value1&...  Default None.
        :param kwargs You can include in it pathVariables and extra headers.
        :return Dictionary with the body response and headers that contains status code too.
        :raises NotFoundException if a parameter is not present in config or in method call.
        """
        rel_url = self.buildUrl(identifier, kwargs)
        headers = self.buildHeaders(identifier, kwargs)

        if identifier in REQUESTS:
            if REQUESTS[identifier]['method'] in ('get', 'post', 'put',
                                                  'delete'):
                if REQUESTS[identifier]['method'] == 'get':
                    return self.conn.request_get(rel_url,
                                                 headers=headers,
                                                 args=query_parameter)
                elif REQUESTS[identifier]['method'] == 'post':
                    return self.conn.request_post(rel_url,
                                                  headers=headers,
                                                  body=body)
                elif REQUESTS[identifier]['method'] == 'put':
                    return self.conn.request_put(rel_url,
                                                 headers=headers,
                                                 body=body)
                elif REQUESTS[identifier]['method'] == 'delete':
                    return self.conn.request_delete(rel_url, headers=headers)
                else:
                    raise NotFoundException('method not found')
            else:
                raise NotFoundException('method not found')

    def buildHeaders(self, identifier, kwargs):
        result = {}
        if 'headers' in GENERAL_PARAMETERS:
            for key, value in GENERAL_PARAMETERS['headers'].items():
                result[key] = value

        if identifier in REQUESTS:
            if 'headers' in REQUESTS[identifier]:
                for key, value in REQUESTS[identifier]['headers'].items():
                    result[key] = value
        else:
            raise NotFoundException('Request identifier not found exception.')

        if 'headers' in kwargs:
            for key, value in kwargs['headers'].items():
                result[key] = value

        if GENERAL_PARAMETERS['username'] and GENERAL_PARAMETERS['password']:
            result['Authorization'] = ''.join([
                'Basic ',
                base64.b64encode(':'.join([
                    GENERAL_PARAMETERS['username'],
                    GENERAL_PARAMETERS['password']
                ]))
            ])

        return result

    def buildUrl(self, identifier, kwargs):
        if identifier in REQUESTS:
            relative_url = REQUESTS[identifier]['relative_url']
        else:
            raise NotFoundException('Request identifier not found exception.')

        parameters = self.getParameterFromConfigFile(relative_url)

        replaced_relative_url = self.replaceParameters(relative_url,
                                                       parameters, kwargs)

        return replaced_relative_url

    def getParameterFromConfigFile(self, relative_url):
        return re.findall('{(?P<parameter>[a-zA-Z%0-9_-]+)}', relative_url)

    def replaceParameters(self, relative_url, parameters, kwargs):
        result = relative_url
        for parameter in parameters:
            if parameter in kwargs:
                result = result.replace(''.join(['{', parameter, '}']),
                                        kwargs[parameter], 1)
            else:
                raise NotFoundException(''.join(
                    ['Parameter ', parameter, ' not found for build the url']))
        return result
Exemplo n.º 24
0
##REST API of Kegg from http://exploringlifedata.blogspot.com/
#Python 3.6.5 |Anaconda, Inc.

import collections
from restful_lib import Connection

kegg_url = "http://rest.kegg.jp"
conn = Connection(kegg_url)

data = conn.request_get('list/ko', headers={'Accept': 'text/json'})
print(data['headers'])
print(type(data['body']))
Exemplo n.º 25
0
#!/usr/bin/python

import csv
import simplejson as json
from restful_lib import Connection
#conn = Connection("http://mc933.lab.ic.unicamp.br:8010")
conn = Connection("http://mc933.lab.ic.unicamp.br:8010/getBusesPositions")
#response = conn.request_get("/getPosition")
response = conn.request_get("")

buses = json.loads(response["body"])

for i in buses:
    response = conn.request_get(str(i))
    obj = json.loads(response["body"])
    writer = csv.writer(open('points.csv', 'ab')) #, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL
    #writer.writerow(['Horario', 'Latitude', 'Longitude'])
    writer.writerow([datetime.strftime(datetime.now(), "%d/%m/%Y %H:%M:%S"), str(lat), str(lon)])
    return "Point (" + str(lat) + ',' + str(lon) + ") saved at " + datetime.strftime(datetime.now(), "%d/%m/%Y %H:%M:%S")
    #print response["body"] + "\n"
#coordenada = json.loads(response["body"])

conn.request_put("/sidewinder", {'color': 'blue'}, headers={'content-type':'application/json', 'accept':'application/json'})
Exemplo n.º 26
0
class SensorBase:

    def __init__(self, host, email, password):
        """
        Set up a connection with the sensorbase host
        """
        
        self.host = host
        self.email = email
        self.password = password

        self.connection = Connection(self.host, self.email, self.password)

    def get_sensordata(self,user=None):
        """
        Deprecated! Written during development to get acquainted with
        Hackystat's REST API. Might become handy if sensorbase.py becomes
        a python lib to interact with hackystat. Refactoring needed?
        """
        
        if user == None:
            user = self.email

        response = self.connection.request_get("/sensordata"+user)
        xml_string = response[u'body']
        
        tree = ET.XML(xml_string)
        ET.dump(tree)

    def put_sensordata(self, datatype="", tool="", resource="", properties=""):
        """
        Used to put up new sensordata on the hackystat server. Creates and
        XML element tree according to the xml schema (hardcoded). Will probably
        need some refactoring later on.
        """

        time = self.get_timestamp()

        # build a tree structure
        e_sensordata = ET.Element("SensorData")

        e_timestamp = ET.SubElement(e_sensordata, "Timestamp")
        e_timestamp.text = time
        
        e_runtime = ET.SubElement(e_sensordata, "Runtime")
        e_runtime.text = time

        e_tool = ET.SubElement(e_sensordata, "Tool")
        e_tool.text = tool

        e_datatype = ET.SubElement(e_sensordata, "SensorDataType")
        e_datatype.text = datatype

        e_resource = ET.SubElement(e_sensordata, "Resource")
        e_resource.text = resource

        e_owner = ET.SubElement(e_sensordata, "Owner")
        e_owner.text = self.email

        e_properties = ET.SubElement(e_sensordata, "Properties")

        for property_key in properties.keys():
            e_property = ET.SubElement(e_properties, "Property")
            e_key = ET.SubElement(e_property, "Key")
            e_key.text = property_key
            e_value = ET.SubElement(e_property, "Value")
            e_value.text = properties[property_key]

        uri = "/sensordata/[email protected]/"+time
        response = self.connection.request_put(uri, None,
                                               ET.tostring(e_sensordata))
        print response
        

    def get_timestamp(self,hour=0,minute=0):
        time_current = datetime.now()
        time_current = time_current + timedelta(hours=hour,minutes=minute)

        #Time format for both oldcurrent timestamp
        time_format = "%Y-%m-%dT%H:%M:%S.000"
        timestamp = time.strftime(time_format, time_current.timetuple())

        return timestamp

    def get_sensor_datatype(self,data):
        """
        Deprecated! Written during development to get acquainted with
        Hackystat's REST API. Might become handy if sensorbase.py becomes
        a python lib to interact with hackystat. Refactoring needed?
        """
        
        name = data.attrib['Name']
        response = self.connection.request_get("/sensordatatypes/"+name)

        xml_string = response[u'body']

        tree = ET.XML(xml_string)
        return response[u'body']

    def get_projectdata(self,user=None,project=None,args=None):
        """
        Get project data from hackystat server. If no user is defined it
        uses the email login used to initiate the connection, if no project
        is defined it uses 'Default'. Arguments in a dictionary are just
        forwarded. Returns the body of the response. Would it be better for
        this function to be a generator of responses? Might be tricky to
        implement given the structure of hackystat repsonses.
        """

        if user == None:
            user = self.email

        if project == None:
            project = "Default"

        response = self.connection.request_get("/projects/"+\
                                               user+"/"+project+\
                                               "/sensordata",args)

        return response[u'body']
Exemplo n.º 27
0
            "Invalid property value (missing or null or empty string) for 'P1'"
        )

    def tearDown(self):
        conn_admin.request_delete(self.p + '/P1', headers=copy(jsonheader))
        conn_admin.request_delete(self.p + '/P2', headers=copy(jsonheader))
        conn_admin.request_delete(self.p + '/P3', headers=copy(jsonheader))
        conn_admin.request_delete(self.p + '/P4', headers=copy(jsonheader))
        conn_admin.request_delete(self.t + '/T1', headers=copy(jsonheader))
        conn_admin.request_delete(self.t + '/T2', headers=copy(jsonheader))
        conn_admin.request_delete(self.t + '/T33', headers=copy(jsonheader))
        conn_admin.request_delete(self.t + '/T44', headers=copy(jsonheader))

if __name__ == '__main__':

    # Check if database is empty
    getextra = ""
    response = conn_none.request_get('resources/channels',
                                     headers=copy(jsonheader))
    assert '200' == response[u'headers'][
        'status'], 'Database list request returned an error'
    j1 = JSONDecoder().decode(response[u'body'])
    if (None != j1[u'channels']):
        print "Database at " + base_url + " not empty."
        d = raw_input('Continue anyway? [y/N] ')
        if d != "y" and d != "Y":
            sys.exit(1)
        dbnonempty = True
        getextra = "?~name=C?"
    unittest.main()
Exemplo n.º 28
0
import collections
from restful_lib import Connection
from tqdm import tqdm

kegg_url = "http://rest.kegg.jp"
conn = Connection(kegg_url)

all_paths = conn.request_get('list/path', headers={'Accept':'text/json'})

path_list = [str(p.split("\t")[0]).split(":")[1] for p in  all_paths['body'].split('\n') if p!=""]

all_mds = conn.request_get('list/md', headers={'Accept':'text/json'})
mds_list = [str(p.split("\t")[0]).split(":")[1] for p in  all_mds['body'].split('\n') if p!=""]

pathways2ko = { p : [str(l.split(" ")[-1]) for l in conn.request_get('get/' + p, headers={'Accept':'text/json'})['body'].split("\n") if "KO_PATHWAY" in l] for p in tqdm(path_list)}
print len([p[0] for p in pathways2ko.values()  if len(p) > 1 ]), "more than one ko"
all_ko_ids = [p[0] for p in pathways2ko.values()  if len(p) > 0 ]

all_kos = { p : .split(" ")[-1]) for l in conn.request_get('get/' + p, headers={'Accept':'text/json'})['body'].split("\n") if "KO_PATHWAY" in l] for p in tqdm(path_list)}

Exemplo n.º 29
0
    def data (self):
        return self.data


if __name__ == '__main__':

    args = {}
    args['accountname'] = sys.argv[1]
    args['user'] = sys.argv[2]
    args['pass'] = sys.argv[3]

    # Create connection to freeagent API
    fapp = Connection("https://" + args['accountname'] + ".freeagentcentral.com", username=args['user'], password=args['pass'])

    # Get XML from REST
    invoicexml = fapp.request_get("/invoices")['body']
    contactxml = fapp.request_get("/contacts")['body']

    # Parse contacts XML
    extract = ["id", "organisation-name", "first-name", "last-name"]
    dh = GetXMLelements(extract)
    
    parseString(contactxml.encode('utf-8'), dh)
    
    # Create contact dictonary
    contacts = {}
    for contact in dh.data:
        contacts[contact[0]] = contact[1:]
    
    # Parse invoices XML
    extract = ["contact-id", "dated-on", "reference", "net-value"]
    def test_AuthorizedPlain(self):
        doPostAndFailPlain(self, self.urlp, Ps12_empty)

    def test_AuthorizedAsProp(self):
        doPostAndGetJSON(self, conn_prop, self.urlp, Ps12_empty, 204, self.urlp, Ps12_empty_r, 200)
    def test_AuthorizedAsChan(self):
        doPostAndGetJSON(self, conn_chan, self.urlp, Ps12_empty, 204, self.urlp, Ps12_empty_r, 200)
    def test_AuthorizedAsAdmin(self):
        doPostAndGetJSON(self, conn_admin, self.urlp, Ps12_empty, 204, self.urlp, Ps12_empty_r, 200)

    def tearDown(self):
        conn_admin.request_delete(self.P1, headers=copy(jsonheader))
        conn_admin.request_delete(self.P2, headers=copy(jsonheader))


if __name__ == '__main__':

# Check if database is empty
    getextra = ""
    response = conn_none.request_get('resources/channels', headers=copy(jsonheader))
    assert '200' == response[u'headers']['status'], 'Database list request returned an error'
    j1 = JSONDecoder().decode(response[u'body'])
    if (None != j1[u'channels']):
        print "Database at " + base_url + " not empty."
        d = raw_input('Continue anyway? [y/N] ')
        if d != "y" and d != "Y":
            sys.exit(1)
        dbnonempty = True
        getextra = "?~name=C?"
    unittest.main()
Exemplo n.º 31
0
def main2():
    #go database
    dbs = MySQLdb.connect(host='mysql.ebi.ac.uk',user='******',passwd='amigo',db='go_latest',port=4085)
    cur = dbs.cursor()
    #reactome
    rwsdl = "http://www.reactome.org:8080/caBIOWebApp/services/caBIOService?wsdl"
    rserv = WSDL.Proxy(rwsdl)
    
    #kegg
    kegg_url = "http://rest.kegg.jp"
    conn = Connection(kegg_url)
    
    #uniprot taxonomy
    url = 'http://www.uniprot.org/taxonomy/'
    
    
    print '---'    
    annotationArray = []
    with open('annotations.dump','rb') as f:
        ar = pickle.load(f)
    for idx,element in enumerate(ar):
        print idx
        modelAnnotations = Counter()
        for index  in element:
            for annotation in element[index]:
                try:
                    bioArray = []
                    tAnnotation = annotation.replace('%3A',':')
                    tAnnotation = re.search(':([^:]+:[^:]+$)',tAnnotation).group(1)
                    
                    if 'GO' in annotation:            
                        cur.execute("SELECT * FROM term WHERE acc='{0}'".format(tAnnotation))
                        for row in cur.fetchall():
                            bioArray.append([row[1],row[3]])
                            modelAnnotations.update([row[1]])
                    elif 'reactome' in annotation:
                        tAnnotation2 = re.search('_([^_]+$)',tAnnotation).group(1)
                        try:
                            query = rserv.queryById(Types.longType(long(tAnnotation)))
                        except:
                            continue
                        bioArray.append([query['name'],tAnnotation])
                        modelAnnotations.update([query['name']])
                    elif 'kegg' in annotation:
                        if 'pathway' in tAnnotation:
                            tAnnotation2 = 'map' + re.search('[^0-9]+([0-9]+$)',tAnnotation).group(1)
                            reply = conn.request_get('find/pathway/{0}'.format(tAnnotation2), headers={'Accept':'text/json'})
                            if reply['body'] != '\n':
                                bioArray.append([reply['body'].split('\t')[1].strip(),tAnnotation])
                                modelAnnotations.update([reply['body'].split('\t')[1].strip()])
                        else:
                            print annotation
                        
                    elif 'uniprot' in annotation:
                        identifier = annotation.split(':')[-1]
                        url = 'http://www.uniprot.org/uniprot/{0}.tab'.format(identifier)
                        params = {}
                        data = urllib.urlencode(params)
                        request = urllib2.Request(url, data)
                        request.add_header('User-Agent', 'Python contact')
                        response = urllib2.urlopen(request)
                        page = response.read(200000)
                        proteinName = page.split('\n')[1].split('\t')[3]
                        modelAnnotations.update([proteinName])
                        
                    elif 'interpro' in annotation:
                        identifier = annotation.split(':')[-1]
                        url = 'http://www.ebi.ac.uk/interpro/entry/{0}'.format(identifier)
                        params = {}
                        data = urllib.urlencode(params)
                        request = urllib2.Request(url, data)
                        request.add_header('User-Agent', 'Python contact')
                        response = urllib2.urlopen(request)
                        page = response.read(200000)
                        pointer = page.index('h2 class="strapline"')
                        extract = page[pointer:pointer+100]
                        extract = extract[extract.index('>')+1:extract.index('<')]
                        modelAnnotations.update([extract])
    
                        
                    #elif 'taxonomy' in annotation:
                        #uniprot stuff for taxonomy
                    #    pass
                        '''
                        url = 'http://www.uniprot.org/taxonomy/'
                        params = {
                        'from':'ACC',
                        'to':'P_REFSEQ_AC',
                        'format':'tab',
                        'query':'P13368 P20806 Q9UM73 P97793 Q17192'
                        }
                        
                        data = urllib.urlencode(params)
                        request = urllib2.Request(url, data)
                        contact = "" # Please set your email address here to help us debug in case of problems.
                        request.add_header('User-Agent', 'Python contact')
                        response = urllib2.urlopen(request)
                        page = response.read(200000)
                        '''
                    else:
                        print '--',annotation,'GO' in tAnnotation
                except:
                    continue
        print modelAnnotations
        annotationArray.append(modelAnnotations)
    with open('parsedAnnotations.dump','wb') as f:
        pickle.dump(annotationArray,f)
#!/usr/bin/env python

import yaml
import json
from restful_lib import Connection
from lib.Twitter import Twitter

with open('credentials.yml', 'r') as f:
    credentials = yaml.load(f)

friends = []
with Twitter(consumer_key=credentials["twitter"]["consumer_key"],
        consumer_secret=credentials["twitter"]["consumer_sec"],
        access_token=credentials["twitter"]["access_tok"],
        access_token_secret=credentials["twitter"]["access_sec"]) as t:
    friends = t.getList()

base_url = "https://keybase.io/_/api/1.0"
conn = Connection(base_url)

for friend in friends:
    resp = conn.request_get("/user/lookup.json", args={'twitter':friend, 'fields':'basics'}, headers={'Accept':'text/json'})
    js = json.loads(resp[u'body'])
    if len(js[u'them']) > 0:
        for entry in js[u'them']:
            print entry[u'basics'][u'username']

Exemplo n.º 33
0
class RedmineRESTAPIWrapper(object):

    def __init__(self, settings):
        self.api_key = settings.redmine_api_key
        self.conn = Connection(settings.redmine_url)
    
    def request_put(self, path, payload):
        return self.conn.request_put(path, args= [ ('key', self.api_key) ], body=json.dumps(payload), headers={'content-type':'application/json', 'accept':'application/json'})

    def request_post(self, path, payload):
        return self.conn.request_post(path, args= [ ('key', self.api_key) ], body=json.dumps(payload), headers={'content-type':'application/json', 'accept':'application/json'})

    def request_get(self, path, payload):
        return self.conn.request_get(path, args= [ ('key', self.api_key) ] + payload, headers={'content-type':'application/json', 'accept':'application/json'})

    def put_items_with_payload(self, url, payload_name, items, payload):
        if not isinstance(items, set):
            items = set([items])
        for i in items:
            resp = self.request_put("/{}/".format(url)+str(i)+".json", { payload_name: payload})
            status = resp[u'headers']['status']
            print 'Item {} '.format(url), i, ', http status code: ', status
            if int(status) != 200:
                print resp

    def put_issues_with_payload(self, issues, payload):
        return self.put_items_with_payload("issues", "issue", issues, payload)

    def put_versions_with_payload(self, versions, payload):
        return self.put_items_with_payload("versions", "version", versions, payload)

    def post_time_entries_with_payload(self, payload):
        resp = self.request_post("/time_entries.json", {'time_entry': payload})
        status = resp[u'headers']['status']
        print 'Issue ', payload['issue_id'], ', http status code: ', status

    def get_items_as_json(self, endpoint, payload):
	resp = self.request_get("/"+endpoint+".json", payload)
        status = resp[u'headers']['status']
        return resp[u'body']

    def get_time_entries(self, payload):
        return self.get_items_as_json('time_entries', payload)

    def get_projects(self, payload):
        return self.get_items_as_json('projects', payload)

    def set_projects_parent(self, projects, parent_id):
        return self.put_items_with_payload("projects", "project", projects, { 'parent_id': parent_id})

    def add_issues_to_milestone(self, issues, version_id, milestone_name):
        self.put_issues_with_payload(issues, {'notes': 'Issue added to milestone: '+milestone_name, 'fixed_version_id': version_id})

    def add_issues_on_sprint(self, issues, sprint_id, sprint_name):
        self.put_issues_with_payload(issues, {'notes': 'Issue added to sprint "'+sprint_name+'" from REST API', 'easy_sprint_id': sprint_id})
                                         
    def set_issues_status(self, issues, status_id):
        self.put_issues_with_payload(issues, {'status_id': status_id})

    def set_issues_status_and_assigned(self, issues, status_id, assigned_id):
        self.put_issues_with_payload(issues, {'status_id': status_id, 'assigned_to_id': assigned_id})

    def set_issues_assigned(self, issues, assigned_id):
        self.put_issues_with_payload(issues, {'assigned_to_id': assigned_id})

    def set_parent_issue(self, issues, parent_id):
        self.put_issues_with_payload(issues,{'parent_issue_id': parent_id})

    def add_notes_on_issues(self, issues, notes):
        self.put_issues_with_payload(issues,{'notes': notes})

    def add_update_on_commit(self, issue, repo_name, branch_name, commit_hash, commit_msg):
        notes = "Repo <b>%s</b> branch <b>%s</b> commit <b>%s</b>: %s" % (repo_name, branch_name, commit_hash, commit_msg)
        return self.add_notes_on_issues(set([issue]), notes)

    def add_update_on_commit_from_line(self, line, repo_name, branch_name):
        (commit_hash, commit_msg) =line.split(' ',1)
        match = re.search("\#(\d+)", commit_msg)
        if match:
            issue = match.group(1)
            resp =self.add_update_on_commit(issue,repo_name, branch_name, commit_hash, commit_msg)

    def add_issues_to_milestone_1(self, issues):
        self.add_issues_to_milestone(issues, 61, "Milestone 1")

    def close_issues(self, issues):
        self.set_issues_status(issues, settings.statuses['closed'])

    def get_items_as_json_full(self, endpoint, params = None, process_cb = None):
        (offset, limit, read, total) = (0, 25, 0, 65535)
        if params is None: params = []
        result = []
        while read<total:
            _params = params + [('limit', limit), ('offset', offset)]
            resp = json.loads(self.get_items_as_json(endpoint, _params))
#            add_to_list(resp["time_entries"], label)
            result += resp[endpoint]
            if process_cb is not None:
                process_cb(resp[endpoint])

            total = resp["total_count"]
            read += limit if (limit+offset < total) else total - offset
            offset += limit
        return result
Exemplo n.º 34
0
class Store():
    def __init__(self, base_store_url, username=None, password=None):
        """ Base URL for the store should be pretty self-explanatory. E.g. something like
            "http://api.talis.com/stores/store_name"
            Only needs to enter the username/password if this class is going to tinker
            with things."""
        if base_store_url.endswith('/'):
            base_store_url = base_store_url[:-1]

        self.base_store_url = base_store_url
        # Split the given URL
        if base_store_url:
            self.conn = Connection(base_store_url, username=username, password=password)

    def does_snapshot_exist(self, snapshot_filename):
        # Test to see if snapshot exists:
        snapshot_path = SNAPSHOT_TEMPLATE % snapshot_filename
        
        response = self.conn.request(snapshot_path, method = "HEAD")
        
        if response.get('headers') and response.get('headers').get('status'):
            status = response.get('headers').get('status')
        
            if status in ['200', '204']:
                return True
            elif status.startswith('4'):
                return False
            # else: raise Error?

        return False

    def schedule_reset_data(self, label, at_time=None):
        """Will request that the store is emptied, and label the request. 
           If a time is given as an ISO8601 formatted string, this will be 
           the scheduled time for the snapshot. Otherwise, it will use the current time."""
        if not at_time:
            at_time=datetime.utcnow().isoformat().split('.')[0]
        
        snapshot_request = RESET_STORE_TEMPLATE % (label, at_time)
        
        return self.conn.request_post(JOB_REQUESTS, body = snapshot_request, headers={'Content-Type':'application/rdf+xml'})

    def schedule_snapshot_data(self, label, at_time=None):
        """Will request a snapshot be made of the store. 
           If a time is given as an ISO8601 formatted string, this will be 
           the scheduled time for the snapshot. Otherwise, it will use the current time."""
        if not at_time:
            at_time=datetime.utcnow().isoformat().split('.')[0]
        
        snapshot_request = SNAPSHOT_STORE_TEMPLATE % (label, at_time)
        
        return self.conn.request_post(JOB_REQUESTS, body = snapshot_request, headers={'Content-Type':'application/rdf+xml'})
        
    def schedule_snapshot_restore(self, label, snapshot_filename, at_time=None):
        """Will request that the store is restored from a snapshot. If a time is given as
           an ISO8601 formatted string, this will be the scheduled time for
           the recovery. Otherwise, it will use the current time."""
        if not at_time:
            at_time=datetime.utcnow().isoformat().split('.')[0]
        
        # Test to see if snapshot exists:
        snapshot_path = SNAPSHOT_TEMPLATE % snapshot_filename
        
        if self.does_snapshot_exist(snapshot_filename):
            snapshot_uri = "%s%s" % (self.base_store_url, snapshot_path)
            snapshot_request = SNAPSHOT_RESTORE_TEMPLATE % (label, snapshot_uri, at_time)    
            return self.conn.request_post(JOB_REQUESTS, body = snapshot_request, headers={'Content-Type':'application/rdf+xml'})
            
    def submit_rdfxml(self, rdf_text):
        """Puts the given RDF/XML into the Talis Store"""
        return self._put_rdf(rdf_text, mimetype="application/rdf+xml")
        
    def _put_rdf(self, rdf_text, mimetype="application/rdf+xml"):
        """Placeholder for allowing other serialisation types to be put into a
           Talis store, whether the conversion takes place here, or if the Talis
           store starts to accept other formats."""
        if rdf_text:
            request_headers = {}
            if mimetype not in ['application/rdf+xml']:
                raise RDFFormatException("%s is not an allowed RDF serialisation format" % mimetype)
            request_headers['Content-Type'] = mimetype
            return self.conn.request_post(META_ENDPOINT, body=rdf_text, headers=request_headers)        
                 
    def _query_sparql_service(self, query, args={}):
        """Low-level SPARQL query - returns the message and response headers from the server.
           You may be looking for Store.sparql instead of this."""
        passed_args = {'query':query}
        passed_args.update(args)
        return self.conn.request_get(SPARQL_ENDPOINT, args=passed_args, headers={'Content-type':'application/x-www-form-urlencoded'})
        
    def _query_search_service(self, query, args={}):
        """Low-level content box query - returns the message and response headers from the server.
           You may be looking for Store.search instead of this."""
           
        passed_args = {'query':query}
        passed_args.update(args)
        
        return self.conn.request_get(CONTENT_ENDPOINT, args=passed_args, headers={'Content-type':'application/x-www-form-urlencoded'} )
        
    def _list_snapshots(self, passed_args={}):
        return self.conn.request_get(SNAPSHOTS, args=passed_args, headers={}) 
        
##############################################################################
# Convenience Functions
##############################################################################

    def submit_rdfxml_from_url(self, url_to_file, headers={"Accept":"application/rdf+xml"}):
        """Convenience method - downloads the file from a given url, and then pushes that
           into the meta store. Currently, it doesn't put it through a parse-> reserialise
           step, so that it could handle more than rdf/xml on the way it but it is a
           future possibility."""
        import_rdf_connection = Connection(url_to_file)
        response = import_rdf_connection.request_get("", headers=headers)
        
        if response.get('headers') and response.get('headers').get('status') in ['200', '204']:
            request_headers = {}
            
            # Lowercase all response header fields, to make matching easier. 
            # According to HTTP spec, they should be case-insensitive
            response_headers = response['headers']
            for header in response_headers:
                response_headers[header.lower()] = response_headers[header]
                
            # Set the body content
            body = response.get('body').encode('UTF-8')
            
            # Get the response mimetype
            rdf_type = response_headers.get('content-type', None)
            
            return self._put_rdf(body, mimetype=rdf_type)
            
    def sparql(self, query, args={}):
        """Performs a SPARQL query and simply returns the body of the response if successful
           - if there is an issue, such as a code 404 or 500, this method will return False. 
           
           Use the _query_sparql_service method to get hold of
           the complete response in this case."""
        response = self._query_sparql_service(query, args)
        headers = response.get('headers')
        
        status = headers.get('status', headers.get('Status'))
        
        if status in ['200', 200, '204', 204]:
            return response.get('body').encode('UTF-8')
        else:
            return False

    def search(self, query, args={}):
        """Performs a search query and simply returns the body of the response if successful
           - if there is an issue, such as a code 404 or 500, this method will return False. 
           
           Use the _query_search_service method to get hold of
           the complete response in this case."""
        response = self._query_search_service(query, args)
        headers = response.get('headers')
        
        status = headers.get('status', headers.get('Status'))
        
        if status in ['200', 200, '204', 204]:
            parsed_atom = Atom_Search_Results(response.get('body').encode('UTF-8'))
            return parsed_atom.get_item_list()
        else:
            return False
Exemplo n.º 35
0
#!/usr/bin/python

import csv
import simplejson as json
from restful_lib import Connection
#conn = Connection("http://mc933.lab.ic.unicamp.br:8010")
conn = Connection("http://mc933.lab.ic.unicamp.br:8010/getBusesPositions")
#response = conn.request_get("/getPosition")
response = conn.request_get("")

buses = json.loads(response["body"])

for i in buses:
    response = conn.request_get(str(i))
    obj = json.loads(response["body"])
    writer = csv.writer(
        open('points.csv',
             'ab'))  #, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL
    #writer.writerow(['Horario', 'Latitude', 'Longitude'])
    writer.writerow([
        datetime.strftime(datetime.now(), "%d/%m/%Y %H:%M:%S"),
        str(lat),
        str(lon)
    ])
    return "Point (" + str(
        lat) + ',' + str(lon) + ") saved at " + datetime.strftime(
            datetime.now(), "%d/%m/%Y %H:%M:%S")
    #print response["body"] + "\n"
#coordenada = json.loads(response["body"])

conn.request_put("/sidewinder", {'color': 'blue'},
Exemplo n.º 36
0
class Account(object):
    """A vcast account.
    
    This class handles communication with Vcast FAUCET PVR Server
    """

    def __init__(self, username, password):
        """Set up a REST connection to Vcast Server
        
        Returns id_usr user id or raises exception"""
        self.username = username
        self.password = password

        url = 'http://www.vcast.it/faucetpvr/api/1.0/server_rest.php'
        self.connection = Connection(url)
        self.connection.add_rest_credentials(username, password)
        
        c = self.connection.request_get('/faucetid')
        if c['body'] == 'Access Denied':
            raise Exception('Wrong credentials')
        self.id_usr = simplejson.loads(c['body'])['id_usr']

    def get_channels(self):
        """Return channels.
        
        The function returns channel as a list of dictionaries.
        Each element of the list is a dictionary with two keys,
        - type, whose value is a string that can be "video" or "audio";
        - name, whose value is a string.

        """
        try:
            reply = self.connection.request_get('/channels')
            return simplejson.loads(reply['body'])
        except:
            print reply

    def get_recordings(self):
        """Return recordings.

        The function returns recordings as a list of dictionaries.
        Each element has the following keys whose value is a unicode
        string (type: unicode).
        - id_rec
        - title
        - channel
        - channel_type (can be 'audio' or 'video')
        - format
        - from_time
        - rec_time
        - to_time
        - retention
        - repeat
        - faucetacc (ignore it)

        """
        try:
            reply =  self.connection.request_get('/recordings')
            return simplejson.loads(reply['body'])
        except:
            print reply

    def get_download_urls(self):
        """Return the urls of avaible recordings"""
        feed = self.connection.request_get('/feed')['body']
        feed = re.sub(r'\\(.)', r'\1', feed)[13:-3]
        f = feedparser.parse(feed)
        urls = []
        for i in f.entries:
            # print i['enclosures'][0]['href']
            urls.append(i['enclosures'][0]['href'])
        return urls

    def new_recording(self, recording):
        """Invia al server una nuova programmazione"""
        json = recording.toJson()
        print json
        a = self.connection.request_post('/recordings', body=json)
        print a

    def delete_recording(self, id):
        repl = self.connection.request_get('/delete_recording',
                args={'id_rec':str(id)})
        print repl
Exemplo n.º 37
0
def trans():
    base_url = "https://partunlimited.demo.triggermesh.io:8080/api"  # The API endpoint for the parts store
    conn = Connection(base_url)

    ce = request.get_json(force=True)
    print(request.data)
    print(request.headers)
    ceSource = request.headers['Ce-Source']

    headers = {}
    headers['Ce-Specversion'] = '1.0'
    headers['Ce-Time'] = request.headers['Ce-Time']
    headers['Ce-Id'] = request.headers['Ce-Id']
    headers[
        'Ce-Source'] = 'translators.triggermesh.io/partsunlimited-demo-translator'

    # For events we don't care about, just return
    if ceSource is not None and not ceSource.startswith(
            'tmtestdb.demo.triggermesh.com/'):
        print("invalid source: " + ceSource)
        return sink()

    # Handle the replenishment event by posting a message to Zendesk
    if ceSource == "tmtestdb.demo.triggermesh.com/replenish":
        headers['Ce-Type'] = 'com.zendesk.ticket.create'
        # Need to extract the manufacturer details
        resp = conn.request_get("/product/" + str(ce["new"]["ID"]))
        respBody = json.loads(resp[u'body'])

        if ce["op"] == "UPDATE" and ce["new"]["QUANTITY"] == 1:
            body = {
                "subject":
                "Parts Unlimited Replenishment Request",
                "body":
                "It is time to reorder " + respBody["name"] + " from " +
                respBody["manufacturer"]["manufacturer"]
            }

            return app.response_class(response=json.dumps(body),
                                      headers=headers,
                                      status=200,
                                      mimetype='application/json')
        else:
            print("invalid replenish")
            return sink()

    # Handle the new order event by sending it to an Oracle Cloud function
    if ceSource == "tmtestdb.demo.triggermesh.com/neworder":
        headers[
            'Ce-Type'] = 'com.triggermesh.targets.oracle.function.partsunlimited-neworder'
        # Need to extract the order details
        resp = conn.request_get("/order/" + str(ce["new"]["ID"]))
        respBody = json.loads(resp[u'body'])

        if ce["op"] == "INSERT":
            body = {
                "name": respBody["user"]["name"],
                "address": respBody["user"]["address"],
                "totalCost": respBody["totalCost"],
                "paymentMethod": respBody["paymentType"],
                "ordered": respBody["dateOrdered"]
            }

            return app.response_class(response=json.dumps(body),
                                      headers=headers,
                                      status=200,
                                      mimetype='application/json')
        else:
            print("invalid neworder")
            return sink()

    else:
        print("unknown source" + ceSource)
        return sink()
Exemplo n.º 38
0
class Client(object):

    version = "0.002001"

    def __init__(self, server, search="/search", index="/index", debug=False, username=False, password=False):

        self.server = server
        self.search = search
        self.index = index
        self.debug = debug
        self.un = username
        self.pw = password

        # docs:
        # http://code.google.com/p/python-rest-client/wiki/Using_Connection
        self.ua = Connection(server)

        # interrogate server
        resp = self.ua.request_get("/")
        # pprint.pprint(resp)
        paths = json.loads(resp["body"])
        self.searcher = Connection(paths["search"])
        self.indexer = Connection(paths["index"], username=username, password=password)
        self.commit_uri = paths["commit"]
        self.rollback_uri = paths["rollback"]
        self.fields = paths["fields"]
        self.facets = paths["facets"]

    def __str__(self):
        return unicode(self).encode("utf-8")

    def __unicode__(self):
        return pprint.pformat(vars(self))

    def _put_doc(self, doc, uri=None, content_type=None):
        # print "adding to index: %s" % doc

        body_buf = ""

        if isinstance(doc, Doc):
            # print "doc isa Dezi.Doc\n"
            body_buf = doc.as_string()
            if uri == None:
                uri = doc.uri
            if content_type == None:
                content_type = doc.mime_type

        elif os.path.isfile(doc):
            f = open(doc, "r")
            body_buf = f.read()
            if uri == None:
                uri = doc

        else:
            # print "doc isa string\n"
            body_buf = doc
            if uri == None:
                raise Exception("uri required")

        server_uri = "/" + uri
        if self.debug:
            print("uri=" + server_uri)
            print("body=%s" % body_buf)

        resp = self.indexer.request_post(server_uri, body=body_buf, headers={"Content-Type": content_type})

        # pprint.pprint(resp)
        return Dezi.Response(resp)

    def add(self, doc, uri=None, content_type=None):
        return self._put_doc(doc, uri, content_type)

    def update(self, doc, uri=None, content_type=None):
        return self._put_doc(doc, uri, content_type)

    def delete(self, uri):
        resp = self.indexer.request_delete(uri)
        return Dezi.Response(resp)

    def commit(self):
        ua = Connection(self.commit_uri, username=self.un, password=self.pw)
        resp = ua.request_post("/")
        return Dezi.Response(resp)

    def rollback(self):
        ua = Connection(self.rollback_uri, username=self.un, password=self.pw)
        resp = ua.request_post("/")
        return Dezi.Response(resp)

    def get(self, **my_args):
        if "q" not in my_args:
            raise Exception("'q' param required")

        resp = self.searcher.request_get("/", args=my_args)
        # pprint.pprint(resp)
        r = Dezi.Response(resp)
        if r.is_success == False:
            self.last_response = r
            return False
        else:
            return r
Exemplo n.º 39
0
#!/usr/bin/python

import time
import csv
import simplejson as json
from restful_lib import Connection
#conn = Connection("http://mc933.lab.ic.unicamp.br:8010")
while 1:
    #mc933.lab.ic.unicamp.br
    #response = conn.request_get("/getPosition")
    try:
        conn = Connection("http://mc933.lab.ic.unicamp.br/getBusesPositions")
        response = conn.request_get("")
    except:
        print "Connection failed: waiting 10 seconds"
        time.sleep(10)
        continue
    
    buses = json.loads(response["body"])
    try:
        f = open('positions.csv')
        read = csv.reader(f)
                            
                #for row in read:
                #    txt+= str(row) + "</br>"
                #return txt)
        # 0 - systemDatetime
        # 1 - moduleDatetime
        # 2 - licensePlate
        # 3 - latitude
        # 4 - longitude
Exemplo n.º 40
0
class CTCTConnection:
	API_BASE_URL = "https://api.constantcontact.com/ws/customers/"
	DO_NOT_INCLUDE_LISTS = ['Removed', 'Do Not Mail', 'Active']

	EVENTS_BOUNCES = 0
	EVENTS_CLICKS = 1
	EVENTS_FORWARDS = 2
	EVENTS_OPENS = 3
	EVENTS_OPT_OUTS = 4
	EVENTS_SENDS = 5

	CAMPAIGNS_ALL = 0
	CAMPAIGNS_DRAFT = 1
	CAMPAIGNS_RUNNING = 2
	CAMPAIGNS_SENT = 3
	CAMPAIGNS_SCHEDULED = 4

	#atom namespace
	NS_ATOM = 'http://www.w3.org/2005/Atom'

	#constantcontact namespace
	NS_CTCT = 'http://ws.constantcontact.com/ns/1.0/'

	def __init__(self, api_key, username, password):
		self.username = username
		login_username = api_key + "%" + username

		connection_base = CTCTConnection.API_BASE_URL + username + "/"

		self.connection = Connection(connection_base, username=login_username, password=password)

	def verify_credentials(self):
		""" Returns whether or not the apikey, username and password the object was initialized are valid """
		response = self.connection.request_get("/")

		# Web service returns a 200 status code if successful
		if(int(response['headers']['status']) == 200):
			return True                                  #valid username, valid credentials
		else:
			if re.match('^[\w.@-]{6,}$', self.username): #does username only contain allowable characters? valid are a-zA-Z0-9.-_@
				return False                             #valid username, invalid credentials
			else:
				raise InvalidUsernameException('This user name contains characters that are no '
											   'longer supported. Please log in to '
											   'constantcontact.com and update your user name.')

	def get_contact_lists(self, listName = None, path=None):
		""" Returns all of the Contact Lists from ConstantContact.
			No parameters are necessary """
		contact_lists = []

		# Default path
		if(path == None):
			path = '/lists'

		response = self.connection.request_get(path)

		# If the status code isn't 200, we have a problem so just return None
		if(int(response['headers']['status']) != 200):
			return None

		# Build an XML Tree from the return
		#xml = ET.fromstring(response['body'])
		xml = ET.fromstring(response['body'].encode('ascii','xmlcharrefreplace'))

		# Check if there is a next link
		links = xml.findall('{http://www.w3.org/2005/Atom}link')
		next_path = None
		for link in links:
			if(link.get('rel') == 'next'):
				next_link = link.get('href')
				slash = next_link.find('/lists')
				next_path = next_link[slash:]
				break

		# Get all of the contact lists
		entries = xml.findall('{http://www.w3.org/2005/Atom}entry')
		for entry in entries:
			contact_list = {'id': entry.findtext('{http://www.w3.org/2005/Atom}id'),
							'name': entry.findtext('{http://www.w3.org/2005/Atom}content/'
												   '{http://ws.constantcontact.com/ns/1.0/}ContactList/'
												   '{http://ws.constantcontact.com/ns/1.0/}Name'),
							'updated': entry.findtext('{http://www.w3.org/2005/Atom}updated')}

			# Don't include some lists
			if(contact_list['name'] not in CTCTConnection.DO_NOT_INCLUDE_LISTS):
				contact_lists.append(contact_list)

		# If there is a next link, recursively retrieve from there too
		if(next_path != None):
			contact_lists.extend(self.get_contact_lists(path=next_path))

		if(listName != None):
			return_list = []
			for list in contact_lists:
				if (list['name'] == listName):
					return_list.append(list)
			return return_list

		return contact_lists

	def get_contact_list_id(self, listName):
		try:
			listEntry = self.get_contact_lists(listName)
			return listEntry[0]["id"]
		except Exception, e:
			logging.error(e.message)
			return False
Exemplo n.º 41
0
#This is a little example of how to make a PUT request (see  http://microformats.org/wiki/rest/urls )
#To a API written with tastypie (https://github.com/toastdriven/django-tastypie )

# import the standard JSON parser
import json
# import the REST library (from http://code.google.com/p/python-rest-client/)
from restful_lib import Connection

#site url
base_url = "http://IP/api/v1"
conn = Connection(base_url)

#get the global config dict
globalconfig = conn.request_get('/globalconfig/')
globalconfig = json.loads(globalconfig["body"])
globalconfig = globalconfig["objects"][0]

#set the args
globalconfig["sfftrim"] = True

#make the put REST request to update the resource
result = conn.request_put('/globalconfig/1/',
                          body=json.dumps(globalconfig),
                          headers={'content-type': 'application/json'})
Exemplo n.º 42
0
    
url_re = re.compile(
    r'^https?://' # http:// or https://
    r'(?:(?:[A-Z0-9-]+\.)+[A-Z]{2,6}|' #domain...
    r'localhost|' #localhost...
    r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
    r'(?::\d+)?' # optional port
    r'(?:/?|/\S+)$', re.IGNORECASE)

# check we have a valid url
if not url_re.match(sys.argv[1]):
    print "You must provide a valid URL"
    sys.exit(1)

# print a message as it may take time for the responce to return
print "Connecting to %s" % sys.argv[1]

# make the request
try:
    conn = Connection(sys.argv[1])
    # otherwise we should just print the response
    response = conn.request_get("/")
except socket.error:
    print "We couldn't connect to %s" % sys.argv[1]
    sys.exit(1)

if response['headers'].status == 200:
    print response['body']
else:
    print "Response was %s" % response['headers'].status
Exemplo n.º 43
0
#!/usr/bin/python

from restful_lib import Connection
conn = Connection("http://localhost:8888")
response = conn.request_get("/getNearestBusStops?lat=-22.8177;lon=-47.0683")

print response['body']

conn.request_put("/sidewinder", {'color': 'blue'}, headers={'content-type':'application/json', 'accept':'application/json'})
Exemplo n.º 44
0
class Store():
    def __init__(self, base_store_url, username=None, password=None):
        """ Base URL for the store should be pretty self-explanatory. E.g. something like
            "http://api.talis.com/stores/store_name"
            Only needs to enter the username/password if this class is going to tinker
            with things."""
        if base_store_url.endswith('/'):
            base_store_url = base_store_url[:-1]

        self.base_store_url = base_store_url
        # Split the given URL
        if base_store_url:
            self.conn = Connection(base_store_url,
                                   username=username,
                                   password=password)

    def does_snapshot_exist(self, snapshot_filename):
        # Test to see if snapshot exists:
        snapshot_path = SNAPSHOT_TEMPLATE % snapshot_filename

        response = self.conn.request(snapshot_path, method="HEAD")

        if response.get('headers') and response.get('headers').get('status'):
            status = response.get('headers').get('status')

            if status in ['200', '204']:
                return True
            elif status.startswith('4'):
                return False
            # else: raise Error?

        return False

    def schedule_reset_data(self, label, at_time=None):
        """Will request that the store is emptied, and label the request. 
           If a time is given as an ISO8601 formatted string, this will be 
           the scheduled time for the snapshot. Otherwise, it will use the current time."""
        if not at_time:
            at_time = datetime.utcnow().isoformat().split('.')[0]

        snapshot_request = RESET_STORE_TEMPLATE % (label, at_time)

        return self.conn.request_post(
            JOB_REQUESTS,
            body=snapshot_request,
            headers={'Content-Type': 'application/rdf+xml'})

    def schedule_snapshot_data(self, label, at_time=None):
        """Will request a snapshot be made of the store. 
           If a time is given as an ISO8601 formatted string, this will be 
           the scheduled time for the snapshot. Otherwise, it will use the current time."""
        if not at_time:
            at_time = datetime.utcnow().isoformat().split('.')[0]

        snapshot_request = SNAPSHOT_STORE_TEMPLATE % (label, at_time)

        return self.conn.request_post(
            JOB_REQUESTS,
            body=snapshot_request,
            headers={'Content-Type': 'application/rdf+xml'})

    def schedule_snapshot_restore(self,
                                  label,
                                  snapshot_filename,
                                  at_time=None):
        """Will request that the store is restored from a snapshot. If a time is given as
           an ISO8601 formatted string, this will be the scheduled time for
           the recovery. Otherwise, it will use the current time."""
        if not at_time:
            at_time = datetime.utcnow().isoformat().split('.')[0]

        # Test to see if snapshot exists:
        snapshot_path = SNAPSHOT_TEMPLATE % snapshot_filename

        if self.does_snapshot_exist(snapshot_filename):
            snapshot_uri = "%s%s" % (self.base_store_url, snapshot_path)
            snapshot_request = SNAPSHOT_RESTORE_TEMPLATE % (
                label, snapshot_uri, at_time)
            return self.conn.request_post(
                JOB_REQUESTS,
                body=snapshot_request,
                headers={'Content-Type': 'application/rdf+xml'})

    def submit_rdfxml(self, rdf_text):
        """Puts the given RDF/XML into the Talis Store"""
        return self._put_rdf(rdf_text, mimetype="application/rdf+xml")

    def _put_rdf(self, rdf_text, mimetype="application/rdf+xml"):
        """Placeholder for allowing other serialisation types to be put into a
           Talis store, whether the conversion takes place here, or if the Talis
           store starts to accept other formats."""
        if rdf_text:
            request_headers = {}
            if mimetype not in ['application/rdf+xml']:
                raise RDFFormatException(
                    "%s is not an allowed RDF serialisation format" % mimetype)
            request_headers['Content-Type'] = mimetype
            return self.conn.request_post(META_ENDPOINT,
                                          body=rdf_text,
                                          headers=request_headers)

    def _query_sparql_service(self, query, args={}):
        """Low-level SPARQL query - returns the message and response headers from the server.
           You may be looking for Store.sparql instead of this."""
        passed_args = {'query': query}
        passed_args.update(args)
        return self.conn.request_get(
            SPARQL_ENDPOINT,
            args=passed_args,
            headers={'Content-type': 'application/x-www-form-urlencoded'})

    def _query_search_service(self, query, args={}):
        """Low-level content box query - returns the message and response headers from the server.
           You may be looking for Store.search instead of this."""

        passed_args = {'query': query}
        passed_args.update(args)

        return self.conn.request_get(
            CONTENT_ENDPOINT,
            args=passed_args,
            headers={'Content-type': 'application/x-www-form-urlencoded'})

    def _list_snapshots(self, passed_args={}):
        return self.conn.request_get(SNAPSHOTS, args=passed_args, headers={})


##############################################################################
# Convenience Functions
##############################################################################

    def submit_rdfxml_from_url(self,
                               url_to_file,
                               headers={"Accept": "application/rdf+xml"}):
        """Convenience method - downloads the file from a given url, and then pushes that
           into the meta store. Currently, it doesn't put it through a parse-> reserialise
           step, so that it could handle more than rdf/xml on the way it but it is a
           future possibility."""
        import_rdf_connection = Connection(url_to_file)
        response = import_rdf_connection.request_get("", headers=headers)

        if response.get('headers') and response.get('headers').get(
                'status') in ['200', '204']:
            request_headers = {}

            # Lowercase all response header fields, to make matching easier.
            # According to HTTP spec, they should be case-insensitive
            response_headers = response['headers']
            for header in response_headers:
                response_headers[header.lower()] = response_headers[header]

            # Set the body content
            body = response.get('body').encode('UTF-8')

            # Get the response mimetype
            rdf_type = response_headers.get('content-type', None)

            return self._put_rdf(body, mimetype=rdf_type)

    def sparql(self, query, args={}):
        """Performs a SPARQL query and simply returns the body of the response if successful
           - if there is an issue, such as a code 404 or 500, this method will return False. 
           
           Use the _query_sparql_service method to get hold of
           the complete response in this case."""
        response = self._query_sparql_service(query, args)
        headers = response.get('headers')

        status = headers.get('status', headers.get('Status'))

        if status in ['200', 200, '204', 204]:
            return response.get('body').encode('UTF-8')
        else:
            return False

    def search(self, query, args={}):
        """Performs a search query and simply returns the body of the response if successful
           - if there is an issue, such as a code 404 or 500, this method will return False. 
           
           Use the _query_search_service method to get hold of
           the complete response in this case."""
        response = self._query_search_service(query, args)
        headers = response.get('headers')

        status = headers.get('status', headers.get('Status'))

        if status in ['200', 200, '204', 204]:
            parsed_atom = Atom_Search_Results(
                response.get('body').encode('UTF-8'))
            return parsed_atom.get_item_list()
        else:
            return False
Exemplo n.º 45
0
class AdflyApiExample():
    #FROM EXAMPLE
    ##    BASE_HOST = 'https://api.adf.ly'
    ##    # TODO: Replace this with your secret key.
    ##    SECRET_KEY = '4c8fa05a-d826-4c06-86e4-59b86bf4868c'
    ##    # TODO: Replace this with your public key.
    ##    PUBLIC_KEY = '2ba3f6ce601d043c177eb2a83eb34f5f'
    ##    # TODO: Replace this with your user id.
    ##    USER_ID = 2
    ##    AUTH_TYPE = dict(basic=1, hmac=2)

    BASE_HOST = 'https://api.adf.ly'
    SECRET_KEY = 'YOUR SECRET KEY'
    PUBLIC_KEY = 'YOUR PUBLIC KEY'
    USER_ID = 'YOUR USER ID'
    AUTH_TYPE = dict(basic=1, hmac=2)

    def __init__(self):
        # In this example we use rest client provided by
        # http://code.google.com/p/python-rest-client/
        # Of course you are free to use any other client.
        self._connection = Connection(self.BASE_HOST)

    def get_groups(self, page=1):
        response = self._connection.request_get('/v1/urlGroups',
                                                args=self._get_params(
                                                    dict(_page=page),
                                                    self.AUTH_TYPE['hmac']))
        return json.loads(response['body'])

    def expand(self, urls, hashes=[]):
        params = dict()

        if type(urls) == list:
            for i, url in enumerate(urls):
                params['url[%d]' % i] = url
        elif type(urls) == str:
            params['url'] = urls

        if type(hashes) == list:
            for i, hashval in enumerate(hashes):
                params['hash[%d]' % i] = hashval
        elif type(hashes) == str:
            params['hash'] = hashes

        response = self._connection.request_get('/v1/expand',
                                                args=self._get_params(
                                                    params,
                                                    self.AUTH_TYPE['basic']))
        return json.loads(response['body'])

    def shorten(self, urls, domain=None, advert_type=None, group_id=None):
        params = dict()
        if domain:
            params['domain'] = domain
        if advert_type:
            params['advert_type'] = advert_type
        if group_id:
            params['group_id'] = group_id

        if type(urls) == list:
            for i, url in enumerate(urls):
                params['url[%d]' % i] = url
        elif type(urls) == str:
            params['url'] = urls

        response = self._connection.request_post('/v1/shorten',
                                                 args=self._get_params(
                                                     params,
                                                     self.AUTH_TYPE['basic']))
        return json.loads(response['body'])

    def get_urls(self, page=1, search_str=None):
        response = self._connection.request_get('/v1/urls',
                                                args=self._get_params(
                                                    dict(_page=page,
                                                         q=search_str),
                                                    self.AUTH_TYPE['hmac']))
        return json.loads(response['body'])

    def update_url(self, url_id, **kwargs):
        params = dict()

        allowed_kwargs = [
            'url', 'advert_type', 'title', 'group_id', 'fb_description',
            'fb_image'
        ]
        for k, v in kwargs.items():
            if k in allowed_kwargs:
                params[k] = v

        response = self._connection.request_put('/v1/urls/%d' % url_id,
                                                args=self._get_params(
                                                    params,
                                                    self.AUTH_TYPE['hmac']))
        return json.loads(response['body'])

    def delete_url(self, url_id):
        response = self._connection.request_delete('/v1/urls/%d' % url_id,
                                                   args=self._get_params(
                                                       dict(),
                                                       self.AUTH_TYPE['hmac']))
        return json.loads(response['body'])

    def _get_params(self, params={}, auth_type=None):
        """Populates request parameters with required parameters,
        such as _user_id, _api_key, etc.
        """
        auth_type = auth_type or self.AUTH_TYPE['basic']

        params['_user_id'] = self.USER_ID
        params['_api_key'] = self.PUBLIC_KEY

        if self.AUTH_TYPE['basic'] == auth_type:
            pass
        elif self.AUTH_TYPE['hmac'] == auth_type:
            # Get current unix timestamp (UTC time).
            params['_timestamp'] = int(time.time())
            params['_hash'] = self._do_hmac(params)
        else:
            raise RuntimeError

        return params

    def _do_hmac(self, params):
        if type(params) != dict:
            raise RuntimeError

        # Get parameter names.
        keys = params.keys()
        # Sort them using byte ordering.
        # So 'param[10]' comes before 'param[2]'.
        keys.sort()
        queryParts = []

        # Url encode query string. The encoding should be performed
        # per RFC 1738 (http://www.faqs.org/rfcs/rfc1738)
        # which implies that spaces are encoded as plus (+) signs.
        for key in keys:
            quoted_key = urllib.quote_plus(str(key))
            if params[key] is None:
                params[key] = ''

            quoted_value = urllib.quote_plus(str(params[key]))
            queryParts.append('%s=%s' % (quoted_key, quoted_value))

        return hmac.new(self.SECRET_KEY, '&'.join(queryParts),
                        hashlib.sha256).hexdigest()