def write_f(data):
    global ln
    global acm
    global buffer
    content = ""

    buffer += data
    if data.endswith("\n") and buffer.strip():
        try:
            content = json.loads(buffer)
            if acm > max_tweets:
                fout.close()
                crea_archivo()

            fout.write(buffer)
            acm += 1
            if "text" in content:
                    #print "\t" + u"{0[user][name]}: {0[text]}".format(content)
                    t = "{0[text]}".format(content)
                    params = "{'data': [{'text': '%s'}]}" %t
                    curl({
                        'url'  : 'http://www.sentiment140.com/api/[email protected]',
                        'post' : params,
                        'write': sentiment_cb,
                    })
        except ValueError:
            print "ERROR AL BUFFER ",ln

        buffer = ""
    ln = ln + 1
Esempio n. 2
0
def write_f(data):
    global ln
    global acm
    global buffer
    content = ""

    buffer += data
    if data.endswith("\n") and buffer.strip():
        try:
            content = json.loads(buffer)
            if acm > max_tweets:
                fout.close()
                crea_archivo()

            fout.write(buffer)
            acm += 1
            if "text" in content:
                #print "\t" + u"{0[user][name]}: {0[text]}".format(content)
                t = "{0[text]}".format(content)
                params = "{'data': [{'text': '%s'}]}" % t
                curl({
                    'url':
                    'http://www.sentiment140.com/api/[email protected]',
                    'post': params,
                    'write': sentiment_cb,
                })
        except ValueError:
            print "ERROR AL BUFFER ", ln

        buffer = ""
    ln = ln + 1
Esempio n. 3
0
def INVIODB(tipo_mis, marca, modello, serialenum, seriale, unita, misura,
            data_ora):
    jsona = json.dumps({
        "type": tipo_mis + ";" + marca + ";" + modello,
        "serial": serialenum,
        "medicalSerial": seriale,
        "um": unita,
        "mea": misura,
        "datetime": data_ora
    })

    print jsona

    dato = urllib.urlencode({"json": jsona})

    movidas = curl.curl("http://ws.coxnico.com/receive.php",
                        req_type="POST",
                        data=dato)
    print movidas.content
    if movidas.content is "DONE":
        import checkDB
        checkDB.CHECKDB(tipo_mis, marca, modello, serialenum, seriale, unita,
                        misura, data_ora)
        import pinLED
        pinLED.invioCompletato()
    else:
        print "NON HO POTUTO INVIARE LA MISURAZIONE"
        import pinLED
        pinLED.fail()
Esempio n. 4
0
def SubDomainScan(url):
	domain = url.split(':')[1].strip('/')
	code,head,res,errcode,_,errstr=curl.curl('-d domain=%s&b2=1 http://i.links.cn/subdomain/' % domain)
	if code == 200:
		import lxml.html
		dom = lxml.html.fromstring(res.replace('\x00','').decode('utf-8', 'ignore'))
		domainlist = dom.xpath("//div[@class='domain']/a")
		for d in domainlist:
			print d.attrib['href']
Esempio n. 5
0
def SubDomainScan(url):
    domain = url.split(':')[1].strip('/')
    code, head, res, errcode, _, errstr = curl.curl(
        '-d domain=%s&b2=1 http://i.links.cn/subdomain/' % domain)
    if code == 200:
        import lxml.html
        dom = lxml.html.fromstring(
            res.replace('\x00', '').decode('utf-8', 'ignore'))
        domainlist = dom.xpath("//div[@class='domain']/a")
        for d in domainlist:
            print d.attrib['href']
Esempio n. 6
0
    def setup(self):
        # Load the configuration
        self.configuration = config.config()

        # Create a pycurl instance
        self.curl_instance = curl.curl(self.configuration)

        # Get all categories
        categories = self.configuration.get_categories()
        categories.insert(0, config.ALL.encode('UTF-8'))
        categories.insert(1, config.INSTALLED.encode('UTF-8'))
        categories.insert(2, config.NOT_INSTALLED.encode('UTF-8'))
        categories.insert(3, config.UPGRADEABLE.encode('UTF-8'))
        categories.insert(4, config.REMOVABLE.encode('UTF-8'))
        categories.insert(5, config.PROCESSING.encode('UTF-8'))
        categories.insert(6, '--')

        # Add categories to dropdown and sections to sectionlist
        schema = """
            methods:
            - name : dropdown
              method : Clear
        """
        for category in categories:
            schema += """
            - name : dropdown
              method : Append
              item : "%s"
            """ % (category)
        self.resources['gui'].parse_and_run(schema)
        self.resources['gui'].execute([{'name' : 'dropdown', 'method' : 'Select', 'n' : 0}])
        
        # Disable GUI
        self.disable_gui()

        # Initialize
        if self.init == False:
            # Clear toolbar
            self.create_toolbar()
            
            # Setup timer to initialize after MainLoop
            self.update_status_bar(strings.LOADING_DATABASE, '')
            self.timer = wx.Timer(self.resources['gui'].objects['frame'])
            self.timer.Start(milliseconds=100, oneShot=True)
            wx.EVT_TIMER(self.resources['gui'].objects['frame'], -1, self.initialize)
        else:
            self.initialize(None)
Esempio n. 7
0
    def setUp(self):
        # Create test directory
        self.test_dir = 'test_dir'
        try: os.mkdir(self.test_dir)
        except WindowsError: pass
        os.chdir(self.test_dir)
        
        # Copy config.ini
        shutil.copyfile(os.path.join('..', config.CONFIG_INI), config.CONFIG_INI)

        # Setup
        self.config = config.config()
        self.curlInstance = curl.curl(self.config)
        self.version_url = self.config.update[config.LOCATION]
        self.update_obj = update.update(self.config, self.curlInstance, True)

        # Load remote version.py
        version_data = self.curlInstance.get_web_data(string.join([self.version_url, update.APPSNAPLIB_DIR, 'version.py'], '/'))
        assert version_data != None
        try: exec(self.update_obj.remove_cr(version_data))
        except: assert 1 == 0

        # Create subdirs
        self.dirs = [update.APPSNAPLIB_DIR]
        for locale in LOCALES:
            self.dirs.append(os.path.join(update.LOCALE_DIR, locale, 'LC_MESSAGES'))
        for dir in self.dirs:
            try: os.makedirs(dir)
            except WindowsError: pass

        # Download release
        for file in FILES:
            self.curlInstance.download_web_data(string.join([self.version_url, update.APPSNAPLIB_DIR, file], '/'), os.path.join(update.APPSNAPLIB_DIR, file), '')

        for file in MISC:
            self.curlInstance.download_web_data(string.join([self.version_url, file], '/'), file, '')

        for locale in LOCALES:
            for file in ['appsnap.po', 'appsnap.mo']:
                self.curlInstance.download_web_data(string.join([self.version_url, update.LOCALE_DIR, locale, 'LC_MESSAGES', file], '/'), \
                    os.path.join(update.LOCALE_DIR, locale, 'LC_MESSAGES', file), '')
                        'write': sentiment_cb,
                    })
        except ValueError:
            print "ERROR AL BUFFER ",ln

        buffer = ""
    ln = ln + 1


if __name__ == "__main__":
    #con lectura de parametros'
    #user = raw_input('Ingrese nombre de usuario:')
    #pwd = getpass(''.join(['Ingrese pwd asociado a cuenta "',user,'":']))

    user = "******"
    pwd = "therealkey5"

    print_titulo("Inicia recoleccion con usuario" + user)

    #Ubicacion geografica
    #params = strip( open('locations_chile','r').read())
    params = strip( open('track','r').read())
    curl({
        'url'  : 'https://stream.twitter.com/1/statuses/filter.json',
        'post' : params,
        'write': write_f,
        'user_passwd' : ''.join([user,':',pwd])
    })

    fout.close()
        except ValueError:
            print "ERROR AL BUFFER ", ln
    #content = json.loads(data.strip())

        buffer = ""

#	fout.write(content)
#print content

    ln = ln + 1


#user = '******'
user = raw_input('Ingrese nombre de usuario:')
pwd = getpass(''.join(['Ingrese pwd asociado a cuenta "', user, '":']))
params = strip(open('locations_chile', 'r').read())
#params.close()
#params = strip( open('count','r').read())
'''
curl -d @locations_chile http://stream.twitter.com/1/statuses/filter.json -ujuan_twitero:<coloca pwd> > captured_tweets_chile.json 
'''

curl({
    'url': 'http://stream.twitter.com/1/statuses/filter.json',
    'post': params,
    'write': write_f,
    'user_passwd': ''.join([user, ':', pwd])
})

fout.close()
Esempio n. 10
0
from link import Link
from curl import curl
from FileManager import FileManager
import time

array_link = [None]
array_failed = [None]
file_ok: bool = False
curl: curl = curl()
count_success: int = 0
count_all: int = 0
count_fail: int = 0

print("""
                _                  __
     /\        | |                / _|
    /  \  _   _| |_ ___  _ __ ___| |_
   / /\ \| | | | __/ _ \| '__/ _ \  _|
  / ____ \ |_| | || (_) | | |  __/ |
 /_/    \_\__,_|\__\___/|_|  \___|_|

                                      """)

print(
    "***********************Autoref by Sadek, Adamo, Ismael**********************************"
)

while not file_ok:
    try:
        file_link: str = input("Merci d'entrer le nom du fichier : ")
        fm: FileManager = FileManager(file_link, 'out.txt')
Esempio n. 11
0
 def download(self, dl_dir, cookie, *curl_param):
     
     self._lock.acquire()
     try:
         if self._status in (KILLED, LOADING, 
             EXTRACTING, FINISHED, ERROR):
             
             return False
         else:
             self._status = LOADING
             self._msg = None
     finally:
         self._lock.release()
     
     try:
         while 1:
             link = self._dl_links.get(timeout=0)
             filename = FILENAME_RE.search(link).group(1)
             dest = os.path.join(dl_dir, filename)
             
             dl = curl.curl(link, dest, cookie=cookie, args=curl_param)
             status = dl.update()
             
             while status[curl.STATUS_RUNNING]:
                 status = dl.update()
                 self._lock.acquire()
                 try:
                     self._msg = str(status)
                     run = self._run
                 finally:
                     self._lock.release()
                 
                 if not run:
                     dl.kill()
                     self._lock.acquire()
                     try:
                         self._status = KILLED
                     finally:
                         self._lock.release()
                     return False
                 
                 time.sleep(UPDATE_INTERVAL)
             
             if self.__has_exceeded(dest):
                 self._status = ERROR
                 self._msg = "Maybe exceeded the daily limit..."
                 return False
             
             if status[curl.STATUS_RETURN] == 0:
                 self._successful_links.append(link)
             
             self._dl_count += 1
             
     except Queue.Empty:
         pass
     
     self._lock.acquire()
     try:
         self._status = DOWNLOADED
     finally:
         self._lock.release()
     
     return True
            for id in cadena:
                fout.write(str(id_usuario) + "," + str(id) + "\n")
        if "error" in content:
            fout.write("error")

        acm += 1
    except ValueError:
        print "excepcion" + data


f = codecs.open(users_file, encoding='utf-8', mode='r')

for l in f:
    print "vamos con " + l
    id_usuario = l.rstrip('\n')
    url = "https://api.twitter.com/1/followers/ids.json?cursor=-1&user_id=" + str(
        id_usuario)
    print url
    try:
        curl({
            'url': url,
            #'post' : params,
            'write': write_f,
            #'user_passwd' : ''.join([user,':',pwd])
        })
    except ValueError:
        print "No tiene seguidores o algo fue mal\n"

fout.close()
f.close()
Esempio n. 13
0
import sys
from curl import curl

DOMAIN =   [
			]

"""
	for domain in DOMAIN:
		result = eval(curl('GET', 'http://www.kuaizhan.com/site/ajax-check-domain?domain=' + domain + '&site_id=5810593358', 'gr_user_id=277ac672-e0e9-476c-82f0-03071848a98c; KUAIZHAN=9gp07m14uuj43cv0a193ac1q74; email=18811464633; device_id=7Q5ly0Oo; KUAIZHAN_V2=54421%7C1487323213%7C8733324462%7Cabcbb8c621c0d6ed48b020f6707941c6a208af11; uid=226258588.392592213.1485613781477.1487323214629.1487323217951.73; gr_session_id_994a7a3e1f7a9f57=a796256a-b03c-4121-97dd-3ec8c30483a8; sid=226258588.618140784.1487323199467.1487323245261', ('User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36',)))
		if result["msg"] == "":
			print domain
"""
	
if __name__ == '__main__':
	print curl('GET', 'http://weibo.com/1663418681/fans', '', ('User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36',));
Esempio n. 14
0
"""
Grab the dump from the ST search API, using command line arguments as keywords.
From the results, write a decent HTML presentation.
"""

import sys, json, string
from curl import Curl as curl
from urllib2 import quote

# necessary variables
st_api = "http://apps.sphdigital.com/stapp/api/"
operation = "search/"
keywords = string.join(sys.argv[1:], ' ')
#keywords = sys.argv[1]
api_request = curl()
output = []
writeto = "data/" + keywords + ".txt"
limit = 5

# concatenate the arguments and post
results = api_request.post(st_api + operation + quote(keywords), "")

# check for multiple objects
try:
    results_obj = json.loads(results)
    for article in results_obj['hits']:
        src = article['_source']
        try:
            blurb = "<p><a href = '%s' title = '%s'>%s</a> by %s (%s)</p>" % (src['articleurl'], src['teaser'], src['headline_en'], src['byline_en'], src['publicationdate'])
            output.append(blurb)
Esempio n. 15
0
    def setUp(self):
        self.config = config.config()
        self.curlInstance = curl.curl(self.config)

        self.failed = []
Esempio n. 16
0
    'lawyer',
    'manong',
    'maifang',
    'mobile',
    'programmer',
    'renrenche',
    'shouji',
    'tencent',
    'tieba',
    'vipvip',
    'xiaonei',
    'xiaomei',
    'xingai',
    'yangsheng',
    'youku',
    'zuliao',
]

if __name__ == '__main__':
    for domain in DOMAIN:
        result = eval(
            curl(
                'GET',
                'http://www.kuaizhan.com/site/ajax-check-domain?domain=' +
                domain + '&site_id=5810593358',
                'gr_user_id=277ac672-e0e9-476c-82f0-03071848a98c; KUAIZHAN=9gp07m14uuj43cv0a193ac1q74; email=18811464633; device_id=7Q5ly0Oo; KUAIZHAN_V2=54421%7C1487323213%7C8733324462%7Cabcbb8c621c0d6ed48b020f6707941c6a208af11; uid=226258588.392592213.1485613781477.1487323214629.1487323217951.73; gr_session_id_994a7a3e1f7a9f57=a796256a-b03c-4121-97dd-3ec8c30483a8; sid=226258588.618140784.1487323199467.1487323245261',
                ('User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36',
                 )))
        if result["msg"] == "":
            print domain
		  cadena=content["ids"]
		  for id in cadena:
		     fout.write(str(id_usuario) + "," + str(id) + "\n")
		if "error" in content:
		  fout.write("error")

		acm += 1	
	except ValueError:
		print "excepcion" + data
	

f = codecs.open(users_file, encoding='utf-8', mode='r')

for l in f:
	print "vamos con " + l
	id_usuario = l.rstrip('\n')
	url ="https://api.twitter.com/1/followers/ids.json?cursor=-1&user_id=" + str(id_usuario)
	print url
	try:
		curl({ 
			'url'  : url,
			#'post' : params,
			'write': write_f,
			#'user_passwd' : ''.join([user,':',pwd])
		})	
	except ValueError:
		print "No tiene seguidores o algo fue mal\n"
		
fout.close()
f.close()
Esempio n. 18
0
 def setUp(self):
     self.curlInstance = curl.curl(config.config())
Esempio n. 19
0
        keywords.append(line) #将读取到的文字加入到keywords列表中
#获取UTC时间,之所以使用UTC时间是为了避免时区问题带来的未知麻烦
#北京时间是UTC+8,如该日UTC时间01:00相当于北京时间09:00
now = datetime.datetime.utcnow()
#将UTC时间格式化,变成如1970-01-01的格式
date = datetime.datetime.strftime(now, '%Y-%m-%d')
#尝试创建文件夹,如果文件夹已创建则跳过
try:
    os.mkdir('rank')
except:
    pass
#打开输出数据的文件,以当日的日期命名它
f = open('%s.csv' % date, 'w')
for keyword in keywords:
#因为关键词可能是非ASCII字符集的,所以需要编码
    encoded_keyword = urllib.quote_plus(keyword)
#下载SERP并提取链接
    url = 'http://www.baidu.com/s?wd=%s&rn=100' % encoded_keyword
#下载SERP,如果出现验证码即延时10分钟并重试
    while True:
        html = curl.curl(url, retry=True, delay=60)
        if '<img src="http://verify.baidu.com/cgi-bin/' in html:
            #except:
            f.write('"%s","%s","%s"\n' % (keyword, 'vertify', '-'))
            continue
        pos=getRank(html,site)
        f.write('"%s","%s","%s"\n' % (keyword, pos, site))
        print keyword, pos
        break
    delay = random.randint(1,2) #随机设定延时时间为1秒或2秒
    time.sleep(delay) #等待x秒以后继续查询下一个词的排名
Esempio n. 20
0
def appsnap_start():
    # Print application header
    print header

    # Parse command line arguments
    try:
        opts, args = getopt.getopt(sys.argv[1:], 'a:cdDf:Fghiln:s:tuUvVwx')
    except getopt.GetoptError:
        print help
        sys.exit(defines.ERROR_GETOPT)

    # Set defaults
    add = None
    categories = False
    download = False
    database_only = False
    categoryfilter = ''
    fixapps = False
    getversion = True
    install = False
    list = False
    names = None
    stringfilter = ''
    test = False
    upgrade = False
    updateall = False
    verbose = False
    csvdump = False
    wikidump = False
    uninstall = False

    for o, a in opts:
        if o == '-a': add = [item.strip() for item in a.split(',')]
        if o == '-c': categories = True
        if o == '-d': download = True
        if o == '-D': database_only = True
        if o == '-f': categoryfilter = a
        if o == '-F': fixapps = True
        if o == '-g': getversion = True
        if o == '-h':
            print help
            sys.exit(defines.ERROR_HELP)
        if o == '-i': install = True
        if o == '-l': list = True
        if o == '-n': names = [item.strip() for item in a.split(',')]
        if o == '-s': stringfilter = a
        if o == '-t': test = True
        if o == '-u': upgrade = True
        if o == '-U': updateall = True
        if o == '-v': verbose = True
        if o == '-V': csvdump = True
        if o == '-w': wikidump = True
        if o == '-x': uninstall = True

    # Load the configuration
    configuration = config.config()

    # Create a pycurl instance
    curl_instance = curl.curl(configuration)

    # Figure out applications selected
    if names != None and len(names) == 1 and names[0] == '*':
        if categoryfilter == '':
            names = configuration.get_sections()
        else:
            print '%s : %s' % (strings.CATEGORY, categoryfilter)
            names = configuration.get_sections_by_category(categoryfilter)

        if stringfilter != '':
            print '%s : %s\n' % (strings.FILTER, stringfilter)
            names = configuration.filter_sections_by_string(names, stringfilter)
        else:
            print

    ###
    # Perform requested action

    # Add application wizard
    if add != None and len(add) > 0:
        add_apps = adder.adder(configuration, curl_instance)
        for app in add:
            add_apps.add_application(app, uninstall)
            
    # List categories
    elif categories == True:
        configuration.display_categories()

    # List applications
    elif list == True:
        if categoryfilter == config.INSTALLED or categoryfilter == config.NOT_INSTALLED:
            names = configuration.get_sections()
            children = []
            for name in names:
                curl_instance.limit_threads(children)
                items = configuration.get_section_items(name)
                child = threading.Thread(target=process.process, args=[configuration, curl_instance, name, items])
                children.append(child)
                child.start()
        
            # Clear out threads
            curl_instance.clear_threads(children)                

        configuration.display_available_sections(categoryfilter, stringfilter)

    # Update AppSnap if requested
    elif updateall == True:
        check_only = test
        if check_only == False: print '-> %s' % strings.UPDATING_APPSNAP
        else: print '-> %s' % strings.CHECKING_FOR_UPDATES
        update_obj = update.update(configuration, curl_instance, check_only, database_only)
        returned = update_obj.update_appsnap()
        
        if returned == update.SUCCESS:
            print '-> %s' % strings.UPDATE_APPSNAP_SUCCEEDED
        elif returned == update.CHANGED:
            print '-> %s' % strings.UPDATES_AVAILABLE
        elif returned == update.UNCHANGED:
            print '-> %s' % strings.NO_CHANGES_FOUND
        elif returned == update.NEW_BUILD:
            print '-> %s' % strings.NEW_BUILD_REQUIRED
        elif returned == update.READ_ERROR:
            print '-> %s - %s' % (strings.UPDATE_APPSNAP_FAILED, strings.UNABLE_TO_READ_APPSNAP)
        elif returned == update.WRITE_ERROR:
            print '-> %s - %s' % (strings.UPDATE_APPSNAP_FAILED, strings.UNABLE_TO_WRITE_APPSNAP)
        elif returned == update.DOWNLOAD_FAILURE:
            print '-> %s - %s' % (strings.UPDATE_APPSNAP_FAILED, strings.DOWNLOAD_FAILED)