Esempio n. 1
0
def test_create():
    assert url.create(
        method='search_query',
        kind='all',
        query1='electron',
        start='0',
        number='10'
    ) == 'http://export.arxiv.org/api/query?search_query=all:electron&start=10&max_results=10'
Esempio n. 2
0
#import ProcessOutputData
print(" ")
print("we imported some things and classes")

# And create the SEARCH and RESULTS objects
SEARCH = searchdef.searchclass()
RESULTS = resultsdef.resultsclass()
print(" ")
print("and made some data objects that store the infos")

# Feed the SEARCH by adding relevant parameters
SEARCH.set_author('Akhmerov')
print(" ")
print("such as the following:")
print("Author: " + SEARCH.get_author())

# Call Lucas's superduper URL create script
searchURL = url.create()  #kind='author',query=SEARCH.get_author())
print(" ")
print("creating this superduper URL:")
print(searchURL)

# that allows us to overload the ArXiv server with our queries!
PAGE = urllib.urlopen(searchURL).read()
print(" ")
print("that we look up")
#ProcessWebPage.PROCESSTHETHINGALREADY() #Which puts all the things in the results script
print(" ")
print("and we process the hell out of!")
print(" ")
Esempio n. 3
0
def test_create():
    assert url.create(method='search_query', kind='all', query1='electron', start='0', number='10')=='http://export.arxiv.org/api/query?search_query=all:electron&start=10&max_results=10'
Esempio n. 4
0
#import ProcessOutputData
print(" ")
print("we imported some things and classes")

# And create the SEARCH and RESULTS objects
SEARCH = searchdef.searchclass()
RESULTS = resultsdef.resultsclass()
print(" ")
print("and made some data objects that store the infos")

# Feed the SEARCH by adding relevant parameters
SEARCH.set_author('Akhmerov')
print(" ")
print("such as the following:")
print("Author: " + SEARCH.get_author())

# Call Lucas's superduper URL create script
searchURL = url.create()#kind='author',query=SEARCH.get_author())
print(" ")
print("creating this superduper URL:")
print(searchURL)

# that allows us to overload the ArXiv server with our queries!
PAGE = urllib.urlopen(searchURL).read()
print(" ")
print("that we look up")
#ProcessWebPage.PROCESSTHETHINGALREADY() #Which puts all the things in the results script
print(" ")
print("and we process the hell out of!")
print(" ")