Example #1
0
    def normalizeURL(self, url):
        """
        Takes a URL (as returned by absolute_url(), for example) and
        replaces the hostname with the actual, fully-qualified
        hostname.
        """
        url_parts = urlsplit(url)
        hostpart  = url_parts[1]
        port      = ''

        if hostpart.find(':') != -1:
            (hostname, port) = split(hostpart, ':')
        else:
            hostname = hostpart

        if hostname == 'localhost' or hostname == '127.0.0.1':
            hostname = getfqdn(gethostname())
        else:
            hostname = getfqdn(hostname)

        if port:
            hostpart = join((hostname, port), ':')

        url = urlunsplit((url_parts[0], hostpart, \
                          url_parts[2], url_parts[3], url_parts[4]))
        return url
Example #2
0
def hostInfo():
    hostName = socket.gethostname()
    print hostName
    print socket.gethostbyname(hostName)
    print socket.gethostbyname_ex(hostName)
    print socket.getfqdn(hostName)
    print socket.getaddrinfo("www.baidu.com", 80)
Example #3
0
 def get_hostname(self):
     """
     Returns a hostname as configured by the user
     """
     if 'hostname' in self.config:
         return self.config['hostname']
     if ('hostname_method' not in self.config
             or self.config['hostname_method'] == 'fqdn_short'):
         return socket.getfqdn().split('.')[0]
     if self.config['hostname_method'] == 'fqdn':
         return socket.getfqdn().replace('.', '_')
     if self.config['hostname_method'] == 'fqdn_rev':
         hostname = socket.getfqdn().split('.')
         hostname.reverse()
         hostname = '.'.join(hostname)
         return hostname
     if self.config['hostname_method'] == 'uname_short':
         return os.uname()[1].split('.')[0]
     if self.config['hostname_method'] == 'uname_rev':
         hostname = os.uname()[1].split('.')
         hostname.reverse()
         hostname = '.'.join(hostname)
         return hostname
     if self.config['hostname_method'].lower() == 'none':
         return None
     raise NotImplementedError(self.config['hostname_method'])
 def is_me(self):
     logger.log(
         "And arbiter is launched with the hostname:%s from an arbiter point of view of addr :%s"
         % (self.host_name, socket.getfqdn()),
         print_it=False,
     )
     return self.host_name == socket.getfqdn() or self.host_name == socket.gethostname()
Example #5
0
def main():
    module = AnsibleModule(
        argument_spec = dict(
            name=dict(required=True, type='str')
        )
    )

    hostname = Hostname(module)

    changed = False
    name = module.params['name']
    current_name = hostname.get_current_hostname()
    if current_name != name:
        hostname.set_current_hostname(name)
        changed = True

    permanent_name = hostname.get_permanent_hostname()
    if permanent_name != name:
        hostname.set_permanent_hostname(name)
        changed = True

    module.exit_json(changed=changed, name=name,
                     ansible_facts=dict(ansible_hostname=name.split('.')[0],
                                        ansible_nodename=name,
                                        ansible_fqdn=socket.getfqdn(),
                                        ansible_domain='.'.join(socket.getfqdn().split('.')[1:])))
Example #6
0
def submit(nworker, nserver, fun_submit, hostIP = 'auto', pscmd = None):
    if hostIP == 'auto':
        hostIP = 'ip'
    if hostIP == 'dns':
        hostIP = socket.getfqdn()
    elif hostIP == 'ip':
        hostIP = socket.gethostbyname(socket.getfqdn())

    if nserver == 0:
        pscmd = None

    envs = {'DMLC_NUM_WORKER' : nworker,
            'DMLC_NUM_SERVER' : nserver}

    rabit = RabitTracker(hostIP = hostIP, nslave = nworker)
    pserver = PSTracker(hostIP = hostIP, cmd = pscmd, envs = envs)

    envs.update(rabit.slave_envs())
    envs.update(pserver.slave_envs())
    rabit.start(nworker)
    fun_submit(nworker, nserver, envs)

    pserver.join()
    # start rabit tracker in another thread
    if nserver == 0:
        rabit.join()
Example #7
0
def send_problem_report(problem):
    """Send a problem report to OCF staff."""

    def format_frame(frame):
        _, filename, line, funcname, _, _ = frame
        return '{}:{} ({})'.format(filename, line, funcname)

    callstack = '\n        by '.join(map(format_frame, inspect.stack()))
    body = \
        """A problem was encountered and reported via ocflib:

{problem}

====
Hostname: {hostname}
Callstack:
    at {callstack}
""".format(problem=problem, hostname=socket.getfqdn(), callstack=callstack)

    send_mail(
        constants.MAIL_ROOT,
        '[ocflib] Problem report from ' + socket.getfqdn(),
        body,
        sender='ocflib <*****@*****.**>',
    )
Example #8
0
File: Init.py Project: AgarFu/bcfg2
 def _prompt_hostname(self):
     '''Ask for the server hostname'''
     data = raw_input("What is the server's hostname: [%s]" % socket.getfqdn())
     if data != '':
         self.shostname = data
     else:
         self.shostname = socket.getfqdn()
Example #9
0
def check_size(file_size, file_name, platform):
    """ compare file size with available size on sftpsite """
    ssh_key = '/home/encryptonator/.ssh/{}'.format(platform)
    df_batch = '/home/encryptonator/df'
    if 'ix5' in socket.getfqdn():
        squid = 'proxy001.ix5.ops.prod.st.ecg.so'
    elif 'esh' in socket.getfqdn():
        squid = 'proxy001.esh.ops.prod.st.ecg.so'
    with open(df_batch, 'w') as df_file:
        df_file.write('df')
    df_file.close()
    sftp_cmd = "/usr/bin/sftp -b {0} -i {1} -o ProxyCommand='/bin/nc -X connect -x {2}:3128 %h %p' {3}@88.211.136.242".format(df_batch, ssh_key, squid, platform)
    proc_sftp = sp.Popen(sftp_cmd, shell=True, stdout=sp.PIPE, stderr=sp.STDOUT)
    proc_out = proc_sftp.communicate()[0]
    retcode = proc_sftp.returncode
    if retcode is not 0:
        notify_nagios('Team {} cannot connect to Sftp Site'.format(platform))
        return 'noconnection'
    else:
        proc_out = proc_out.split('\n')[-2]  # take last but one row
        disk_avail = int(proc_out.split()[-3].replace('%', ''))

        if file_size >= disk_avail:
            mb_file_size = file_size / 1024
            mb_disk_avail = disk_avail / 1024
            notify_nagios('The file size to backup ({0} MB) exceeds the space available ({1} MB) on Sftp Site'.format(mb_file_size, mb_disk_avail))
            notify_nagios('The file {} will be removed'.format(file_name))
            return 'nospace'
Example #10
0
    def sendPing(self, tasks, isReboot=False):
        # Update the values (calls subclass impl)
        self.update()

        if conf.NETWORK_DISABLED:
            return

        # Create the hardware profile
        hw = ttypes.Hardware()
        hw.physicalCpus = self.physicalCpus
        hw.logicalCpus = self.logicalCpus
        hw.totalRamMb = self.totalRamMb
        hw.freeRamMb = self.freeRamMb
        hw.totalSwapMb = self.totalSwapMb
        hw.freeSwapMb = self.freeSwapMb
        hw.cpuModel = self.cpuModel
        hw.platform = self.platform
        hw.load = self.load

        # Create a ping
        ping = ttypes.Ping()
        ping.hostname = socket.getfqdn()
        ping.ipAddr = socket.gethostbyname(socket.getfqdn())
        ping.isReboot = isReboot
        ping.bootTime = self.bootTime
        ping.hw = hw
        ping.tasks = tasks

        logger.info("Sending ping: %s" % ping)
        try:
            service, transport = client.getPlowConnection()
            service.sendPing(ping)
            transport.close()
        except Exception, e:
            logger.warn("Unable to send ping to plow server, %s" % e)
Example #11
0
def get_hostname(config, method=None):
    """
    Returns a hostname as configured by the user
    """
    if 'hostname' in config:
        return config['hostname']

    if method is None:
        if 'hostname_method' in config:
            method = config['hostname_method']
        else:
            method = 'smart'

    # case insensitive method
    method = method.lower()

    if method == 'smart':
        hostname = get_hostname(config, 'fqdn_short')
        if hostname != 'localhost':
            return hostname
        hostname = get_hostname(config, 'hostname_short')
        return hostname

    if method == 'fqdn_short':
        return socket.getfqdn().split('.')[0]

    if method == 'fqdn':
        return socket.getfqdn().replace('.', '_')

    if method == 'fqdn_rev':
        hostname = socket.getfqdn().split('.')
        hostname.reverse()
        hostname = '.'.join(hostname)
        return hostname

    if method == 'uname_short':
        return os.uname()[1].split('.')[0]

    if method == 'uname_rev':
        hostname = os.uname()[1].split('.')
        hostname.reverse()
        hostname = '.'.join(hostname)
        return hostname

    if method == 'hostname':
        return socket.gethostname()

    if method == 'hostname_short':
        return socket.gethostname().split('.')[0]

    if method == 'hostname_rev':
        hostname = socket.gethostname().split('.')
        hostname.reverse()
        hostname = '.'.join(hostname)
        return hostname

    if method == 'none':
        return None

    raise NotImplementedError(config['hostname_method'])
Example #12
0
 def _prompt_hostname(self):
     """Ask for the server hostname."""
     data = raw_input("What is the server's hostname [%s]: " % socket.getfqdn())
     if data != '':
         self.shostname = data
     else:
         self.shostname = socket.getfqdn()
Example #13
0
  def __init__(self,cp):
    global has_gratia
    global Gratia
    global StorageElement
    global StorageElementRecord
    if not has_gratia:
        try:
            Gratia = __import__("Gratia")
            StorageElement = __import__("StorageElement")
            StorageElementRecord = __import__("StorageElementRecord")
            has_gratia = True
        except:
            raise
    if not has_gratia:
        print "Unable to import Gratia and Storage modules!"
        sys.exit(1)

    Gratia.Initialize()
    try:
        if Gratia.Config.get_SiteName().lower().find('generic') >= 0:
            Gratia.Config.setSiteName(socket.getfqdn())
    except:
        pass
    try:
        if Gratia.Config.get_ProbeName().lower().find('generic') >= 0:
            Gratia.Config.setProbeName('dCache-storage:%s' % socket.getfqdn())
    except:
        pass
Example #14
0
def _localhosts_aggressive():
    answ={}
    stack=['localhost', '127.0.0.1', socket.getfqdn(), socket.gethostname()]
    def lh_add(*hs):
        for h in hs:
            if answ.has_key(h):
                continue
            stack.append(h)
    while stack:
        h = stack.pop()
        if answ.has_key(h):
            continue
        answ[h] = True
        lh_add(socket.getfqdn(h))
        try:
            lh_add(socket.gethostbyname(h))
        except:
            pass
        try:
            fqdn, aliases, ip_addresses = socket.gethostbyname_ex(h)
            lh_add(fqdn, *aliases)
            lh_add(*ip_addresses)
        except:
            pass
        try:
            fqdn, aliases, ip_addresses = socket.gethostbyaddr(h)
            lh_add(fqdn, *aliases)
            lh_add(*ip_addresses)
        except:
            pass
    return answ
Example #15
0
    def find(self, all=False):
        '''
        If the sll parameter is True, then all methods will be tries and a
        list of all RPR's found will be returned.
        '''
        if self.verbose: print "\tSearching for a BERT-Remote Procedure Repository"

        # First try the local machine.
        if self.verbose: print "\tTrying the local machine"
        if self.search((socket.getfqdn(), self.port)):
            if self.verbose: print "\tRPR located: %s:%s" % (socket.getfqdn(), self.port)
            if not all: return self.Registries

        # First try the supplied places (if any)
        if self.verbose and self.places: print "\tTrying supplied places",
        elif self.verbose and not self.places: print "\tNo places supplied"
        for place in self.places:
            if self.verbose: print "\tTrying %s" % (str(place))
            if self.search(place):
                if self.verbose: print "\tRPR located: %s:%s" % place
                if not all: return self.Registries

        # Next broadcast for places
        if self.verbose: print "\tBroadcasting for an RPR"
        for place in self.broadcastForPlaces():
            if self.verbose: print "\tResponse from %s:%s" % place
            if self.search(place):
                if self.verbose: print "\tRPR located: %s:%s" % place
                if not all: return self.Registries

        if self.verbose: print "\t%s RPR's found" % (len(self.Registries))
        return self.Registries
Example #16
0
def _start_services(primary_node, **kwargs):
    logger.info("Formatting namenode on %s...", primary_node.fqdn)
    primary_node.ssh('hdfs namenode -format')

    logger.info("Starting HDFS...")
    primary_node.ssh('/hadoop/sbin/start-dfs.sh')

    logger.info("Starting YARN...")
    primary_node.ssh('/hadoop/sbin/start-yarn.sh')

    logger.info('Starting HBase...')
    primary_node.ssh('/hbase/bin/start-hbase.sh')
    primary_node.ssh('/hbase/bin/hbase-daemon.sh start rest')

    logger.info("NameNode and HBase master are located on %s. SSH over and have fun!",
                primary_node.hostname)

    logger.info("The HDFS NameNode web UI can be reached at http://%s:%s",
                getfqdn(), get_host_port_binding(primary_node.container_id,
                                                 NAMENODE_WEB_UI_PORT))

    logger.info("The YARN ResourceManager web UI can be reached at http://%s:%s",
                getfqdn(), get_host_port_binding(primary_node.container_id,
                                                 RESOURCEMANAGER_WEB_UI_PORT))

    logger.info("The HBase master web UI can be reached at http://%s:%s",
                getfqdn(), get_host_port_binding(primary_node.container_id,
                                                 kwargs.get('hbase_master_web_ui_port')))

    logger.info("The HBase REST server can be reached at http://%s:%s",
                getfqdn(), get_host_port_binding(primary_node.container_id,
                                                 HBASE_REST_SERVER_PORT))
Example #17
0
 def get_user_dns():
     try:
         domain = '.'.join(getfqdn().split('.')[1:])
         user_dns = getfqdn(domain).split('.')[0]
     except:
         user_dns = ''
     return user_dns
Example #18
0
def send_initial_dashboard_update(data, config):
    # Dashboard does not like Unicode, just ASCII encoding
    syncid = str(config['monitoring']['syncid'])

    try:
        if os.environ.get("PARROT_ENABLED", "FALSE") == "TRUE":
            raise ValueError()
        sync_ce = loadSiteLocalConfig().siteName
    except Exception:
        for envvar in ["GLIDEIN_Gatekeeper", "OSG_HOSTNAME", "CONDORCE_COLLECTOR_HOST"]:
            if envvar in os.environ:
                sync_ce = os.environ[envvar]
                break
        else:
            host = socket.getfqdn()
            sync_ce = config['default host']
            if host.rsplit('.')[-2:] == sync_ce.rsplit('.')[-2:]:
                sync_ce = config['default ce']
            else:
                sync_ce = 'Unknown'

    logger.info("using sync CE {}".format(sync_ce))

    parameters = {
        'ExeStart': str(config['executable']),
        'NCores': config.get('cores', 1),
        'SyncCE': sync_ce,
        'SyncGridJobId': syncid,
        'WNHostName': socket.getfqdn()
    }
    monitor(parameters)
Example #19
0
def getIP():
    '''
    Returns the stringified version of the local hosts IP or the fully qualified
    domain name if there is an error.
    
    Parameters: None

    Return: stringified version of the local hosts IP
  
    Raises: Nothing
    '''
    #first we check if we have ACS_HOST defined
    try:
        return str(gethostbyname(environ['ACS_HOST']))
    except:
        pass
    
    #determine the local hosts IP address in string format
    localhost = ""
    if (environ['OSYSTEM'] == environ['CYGWIN_VER']):
        localhost = str(gethostbyname(getfqdn().split(".")[0]))
    else:
        localhost = str(gethostbyname(getfqdn()))
    #if what's above failed for some reason...
    if (localhost == None) or (localhost == ""):
        #take the HOST environment variable
        localhost = str(environ['HOST'])
    return localhost
Example #20
0
	def __init__(self, sa, argv):
		import socket
		self.sa = sa
		self.env = Path(self.sa.env)
		self.argv = argv
		self.defaults = {
			'svn_dir': Path('svn'),
			'git_dir': Path('git'),
			'trac_dir': Path('trac'),
			'http_base': Path('/'),
			'http_vhost': socket.getfqdn(),
			'trac_url': Path('trac'),
			'submin_url': Path('submin'),
			'svn_url': Path('svn'),
			'create_user': '******',
			'enable_features': 'svn, git, apache, nginx',
			'smtp_from': 'Submin <root@%s>' % socket.getfqdn(),
		}
		self.init_vars = {
			'conf_dir': Path('conf'),
			'hooks_dir': Path('hooks'),
		}
		self.init_vars.update({
			'authz': self.init_vars['conf_dir'] + Path('authz'),
			'htpasswd': self.init_vars['conf_dir'] + Path('htpasswd'),
		})
		self.email = None
def process(mysettings, key, logentries, fulltext):
	if "PORTAGE_ELOG_MAILURI" in mysettings:
		myrecipient = mysettings["PORTAGE_ELOG_MAILURI"].split()[0]
	else:
		myrecipient = "root@localhost"
	
	myfrom = mysettings["PORTAGE_ELOG_MAILFROM"]
	myfrom = myfrom.replace("${HOST}", socket.getfqdn())
	mysubject = mysettings["PORTAGE_ELOG_MAILSUBJECT"]
	mysubject = mysubject.replace("${PACKAGE}", key)
	mysubject = mysubject.replace("${HOST}", socket.getfqdn())

	# look at the phases listed in our logentries to figure out what action was performed
	action = _("merged")
	for phase in logentries:
		# if we found a *rm phase assume that the package was unmerged
		if phase in ["postrm", "prerm"]:
			action = _("unmerged")
	# if we think that the package was unmerged, make sure there was no unexpected
	# phase recorded to avoid misinformation
	if action == _("unmerged"):
		for phase in logentries:
			if phase not in ["postrm", "prerm", "other"]:
				action = _("unknown")

	mysubject = mysubject.replace("${ACTION}", action)

	mymessage = portage.mail.create_message(myfrom, myrecipient, mysubject, fulltext)
	try:
		portage.mail.send_mail(mysettings, mymessage)
	except PortageException as e:
		writemsg("%s\n" % str(e), noiselevel=-1)

	return
Example #22
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            name=dict(required=True)
        ),
        supports_check_mode=True,
    )

    hostname = Hostname(module)
    name = module.params['name']

    current_hostname = hostname.get_current_hostname()
    permanent_hostname = hostname.get_permanent_hostname()

    changed = hostname.update_current_and_permanent_hostname()

    if name != current_hostname:
        name_before = current_hostname
    elif name != permanent_hostname:
        name_before = permanent_hostname

    kw = dict(changed=changed, name=name,
              ansible_facts=dict(ansible_hostname=name.split('.')[0],
                                 ansible_nodename=name,
                                 ansible_fqdn=socket.getfqdn(),
                                 ansible_domain='.'.join(socket.getfqdn().split('.')[1:])))

    if changed:
        kw['diff'] = {'after': 'hostname = ' + name + '\n',
                      'before': 'hostname = ' + name_before + '\n'}

    module.exit_json(**kw)
Example #23
0
def submit(nworker, nserver, fun_submit, hostIP = 'auto', pscmd = None):
    if hostIP == 'auto':
        hostIP = 'ip'
    if hostIP == 'dns':
        hostIP = socket.getfqdn()
    elif hostIP == 'ip':
        from socket import gaierror
        try:
            hostIP = get_some_ip(socket.getfqdn())
        except gaierror:
            logging.warn('get_some_ip(socket.getfqdn()) failed... trying on hostname()')
            hostIP = get_some_ip(socket.gethostname())

    if nserver == 0:
        pscmd = None

    envs = {'DMLC_NUM_WORKER' : nworker,
            'DMLC_NUM_SERVER' : nserver}

    rabit = RabitTracker(hostIP = hostIP, nslave = nworker)
    pserver = PSTracker(hostIP = hostIP, cmd = pscmd, envs = envs)

    envs.update(rabit.slave_envs())
    envs.update(pserver.slave_envs())
    rabit.start(nworker)
    fun_submit(nworker, nserver, envs)

    pserver.join()
    # start rabit tracker in another thread
    if nserver == 0:
        rabit.join()
Example #24
0
def changeToWspaceDir():    
    print '\nTrying to detect and change to your workspace directory...'
    getWspaceCmd='accurev show wspaces -f x'
    try:
        #funny how the absence of the second parameter blanks out e.output in Exception
        wspaceResult=subprocess.check_output(getWspaceCmd, stderr=subprocess.STDOUT)
    except Exception as e:
        print 'Error...'
        print e.output
        if(re.search(r'Not authenticated', e.output)!=None) or (re.search(r'Expired', e.output)!=None):
            print 'Login...'
            subprocess.call('accurev login')
            wspaceResult=subprocess.check_output(getWspaceCmd)
        else:
            print 'Quitting'
            quit(1)
    
    print wspaceResult
    wspaceDir='.'
    
    treeRoot=ET.fromstring(wspaceResult)
    for child in treeRoot:
        if child.tag.lower() == 'element':
            print "child.attrib['Host'].lower()::"+child.attrib['Host'].lower()
            print 'socket.getfqdn().lower()::'+socket.getfqdn().lower()
            if(re.search(socket.getfqdn().lower(),child.attrib['Host'].lower())!=None) or (re.search(child.attrib['Host'].lower(),socket.getfqdn().lower())!=None): #changed from equals comparison after accurev update (child.attrib['Host'].lower()==socket.getfqdn().lower())
                wspaceDir=child.attrib['Storage']
                break
    
    print wspaceDir
    os.chdir(wspaceDir)
Example #25
0
 def is_me(self, lookup_name):
     logger.info("And arbiter is launched with the hostname:%s "
                 "from an arbiter point of view of addr:%s", self.host_name, socket.getfqdn())
     if lookup_name:
         return lookup_name == self.get_name()
     else:
         return self.host_name == socket.getfqdn() or self.host_name == socket.gethostname()
def main():
    project = "haproxy"
    tablename = "http_host"
    datalogger = DataLoggerWeb("https://datalogger-api.tirol-kliniken.cc/DataLogger")
    # datestring = datalogger.get_last_business_day_datestring()
    # two days back for haproxy logs
    datestring = (datetime.date.today() - datetime.timedelta(int(2))).isoformat()
    caches = datalogger.get_caches(project, tablename, datestring)
    vhosts = [eval(key)[0].split(":")[0] for key in caches["ts"]["keys"].keys()]
    index = 1
    out_data = []
    out_data.append(("index", "vhost", "domain", "fqdn", "ip", "ip_reverse_hostname", "status_code", "x_backend_server", "duration"))
    filter_vhost = generate_filter_vhost()
    for vhost in vhosts:
        if filter_vhost(vhost) is True:
            logging.info("vhost %s filtered out", vhost)
            continue
        ip = "unknown"
        hostname = "unknown"
        duration = -1.0
        status_code = 0
        x_backend_server = None
        domain = ".".join(vhost.split(".")[1:])
        try:
            fqdn = socket.getfqdn(vhost)
            ip = socket.gethostbyname(vhost)
            hostname = socket.gethostbyaddr(ip)[0]
        except (socket.herror, socket.gaierror):
            pass
        if (ip == "unknown") or (not ip.startswith("10.")):
            logging.info("could not resolv hostname %s , probably fake", vhost)
            continue
        # could be obsolete
        elif (not ip.startswith("10.")):
            logging.info("%s is external, skipping", vhost)
            continue
        try:
            starttime = time.time()
            res = requests.request("GET", "http://%s/" % vhost, timeout=10, stream=False)
            duration = time.time()-starttime
            status_code = res.status_code
        except (requests.exceptions.ConnectionError, requests.exceptions.InvalidURL):
            logging.info("ConnectionError or InvalidURL occured %s", vhost)
        except requests.exceptions.ReadTimeout:
            logging.info("RequestTimeout occured %s", vhost)
        try:
            x_backend_server = res.headers['x-backend-server']
            if len(x_backend_server) == 8:
                # TODO not exact, hack
                ip_backend_server = decode_ip(x_backend_server)
                x_backend_server = socket.gethostbyaddr(ip_backend_server)[0] # only hostname part
            else:
                x_backend_server = socket.getfqdn(x_backend_server)
        except KeyError:
            pass
        logging.debug("%40s : %20s : %40s : %15s : %40s : %d : %s : %02f", vhost, domain, fqdn, ip, hostname, status_code, x_backend_server, duration)
        out_data.append((index, vhost, domain, fqdn, ip, hostname, status_code, x_backend_server, duration))
        index += 1
    json.dump({"last_update_ts" : str(datetime.date.today()), "data" : out_data}, open("/var/www/webapps/webmap/webmap.json", "w"))
Example #27
0
def getCanonicalIP(IP):
    # if IP is 'localhost' or '127.0.0.1', use the canonical local hostname.
    # (this is mostly useful when multiple clients run on the same host)
    # XXX: could use gethostbyname to get IP addy instead.
    if IP == '127.0.0.1' or IP == 'localhost':
        return socket.getfqdn()
    else:
        return socket.getfqdn(IP)
def determineSeFromDomain():
    fqdn = socket.getfqdn()
    domain = "".join(fqdn.split(".")[-2:])
    if domain == "accn" or domain == "educn":
        domain = "".join(socket.getfqdn().split(".")[-3:])

    SEname = SeDomainMap.get(domain, "")
    return SEname
Example #29
0
def validate_hostname(node,additional_msg=""):
  logit("... validating hostname: %s" % node)
  if node <> socket.getfqdn():
    logerr("""The hostname option (%(hostname)s) shows a different host. 
      This is %(thishost)s.
      %(msg)s """ % { "hostname" : node,
                      "thishost" : socket.getfqdn(),
                      "msg"      : additional_msg,})
Example #30
0
def freedb_command(freedb_server, freedb_port, cmd, *args):
    """given a freedb_server string, freedb_port int,
    command unicode string and argument unicode strings,
    yields a list of Unicode strings"""

    try:
        from urllib.request import urlopen
    except ImportError:
        from urllib2 import urlopen
    try:
        from urllib.parse import urlencode
    except ImportError:
        from urllib import urlencode
    from socket import getfqdn
    from audiotools import VERSION
    from sys import version_info

    PY3 = version_info[0] >= 3

    # some debug type checking
    assert(isinstance(cmd, str if PY3 else unicode))
    for arg in args:
        assert(isinstance(arg, str if PY3 else unicode))

    POST = []

    # generate query to post with arguments in specific order
    if (len(args) > 0):
        POST.append((u"cmd", u"cddb %s %s" % (cmd, " ".join(args))))
    else:
        POST.append((u"cmd", u"cddb %s" % (cmd)))

    if PY3:
        POST.append((u"hello",
                     u"user %s %s %s" % (getfqdn(),
                                         u"audiotools",
                                         VERSION)))
    else:
        POST.append((u"hello",
                     u"user %s %s %s" % (getfqdn().decode("UTF-8", "replace"),
                                         u"audiotools",
                                         VERSION.decode("ascii"))))

    POST.append((u"proto", u"6"))

    # get Request object from post
    request = urlopen(
        "http://%s:%d/~cddb/cddb.cgi" % (freedb_server, freedb_port),
        urlencode(POST).encode("UTF-8") if (version_info[0] >= 3) else
        urlencode(POST))
    try:
        # yield lines of output
        line = request.readline()
        while (len(line) > 0):
            yield line.decode("UTF-8", "replace")
            line = request.readline()
    finally:
        request.close()
Example #31
0
            plt.plot(train_loss, linewidth=3, label="train")
            plt.plot(valid_loss, linewidth=3, label="valid")
            plt.grid()
            plt.legend()
            plt.xlabel("epoch")
            plt.ylabel("loss")
            plt.ylim(1e-1, 1e1)
            plt.yscale("log")
            plt.savefig(outpath + 'train_val_loss.png')
            plt.close(fig)


if __name__ == '__main__':

    picsize = [120, 120]
    ip = socket.getfqdn()
    outpath = './model_outputs%s_%s_60/' % (picsize[0], ip)
    if not os.path.exists(outpath):
        os.makedirs(outpath)

    # photodb_MainST600_40m03r/'#photodb_MainST100_25m1r#photodb_MainST_NE
    pathname = './data/photodb_MainST100_25m1r/'
    csv2read = 'folderdata.csv'

    # box including everithing
    SW_sf = np.array([37.707875, -122.518624])
    NE_sf = np.array([37.815086, -122.378205])
    SW = SW_sf
    NE = NE_sf
    csv2save = 'folderdata_SW%d_%dNE%d_%d.csv' % (SW[0], SW[1], NE[0], NE[1])
Example #32
0
def get_host_ip_address():
    return socket.gethostbyname(socket.getfqdn())
Example #33
0
def get_own_ip():
    try:
        return socket.gethostbyname(socket.getfqdn())
    except socket.gaierror:
        return '127.0.0.1'
Example #34
0
    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""

from SocketServer import BaseRequestHandler, TCPServer
import socket
from host_test import Test
from sys import stdout

SERVER_IP = str(socket.gethostbyname(socket.getfqdn()))
SERVER_PORT = 7


class TCPEchoClientTest(Test):
    def __init__(self):
        Test.__init__(self)
        self.mbed.init_serial()

    def send_server_ip_port(self, ip_address, port_no):
        print "Resetting target..."
        self.mbed.reset()
        print "Sending server IP Address to target..."
        connection_str = ip_address + ":" + str(port_no) + "\n"
        self.mbed.serial.write(connection_str)
    def test_peer_probe_status(self):

        # get FQDN of node1 and node2
        node1 = socket.getfqdn(self.mnode)
        node2 = socket.getfqdn(self.servers[1])

        # peer probe to a new node, N2 from N1
        ret, _, err = peer_probe(node1, node2)
        self.assertEqual(ret, 0, ("Peer probe failed to %s from %s with "
                                  "error message %s" %
                                  (self.servers[1], self.mnode, err)))
        g.log.info("Peer probe from %s to %s is success", self.mnode,
                   self.servers[1])

        # check peer status in both the nodes, it should have FQDN
        # from node1
        ret, out, err = peer_status(self.mnode)
        self.assertEqual(ret, 0, ("Failed to get peer status from %s with "
                                  "error message %s" % (self.mnode, err)))
        g.log.info("Successfully got peer status from %s", self.mnode)

        self.assertIn(node2, out, ("FQDN of %s is not present in the "
                                   "output of peer status from %s" %
                                   (self.servers[1], self.mnode)))
        g.log.info("FQDN of %s is present in peer status of %s",
                   self.servers[1], self.mnode)

        # from node2
        ret, out, err = peer_status(self.servers[1])
        self.assertEqual(ret, 0, ("Failed to get peer status from %s with "
                                  "error message %s" % (self.servers[1], err)))
        g.log.info("Successfully got peer status from %s", self.servers[1])

        self.assertIn(node1, out, ("FQDN of %s is not present in the "
                                   "output of peer status from %s" %
                                   (self.mnode, self.servers[1])))
        g.log.info("FQDN of %s is present in peer status of %s", self.mnode,
                   self.servers[1])

        # create a distributed volume with 2 bricks
        servers_info_from_two_node_cluster = {}
        for server in self.servers[0:2]:
            servers_info_from_two_node_cluster[server] = self.all_servers_info[
                server]

        self.volume['servers'] = self.servers[0:2]
        self.volume['voltype']['dist_count'] = 2
        ret = setup_volume(self.mnode, servers_info_from_two_node_cluster,
                           self.volume)
        self.assertTrue(ret, ("Failed to create "
                              "and start volume %s" % self.volname))
        g.log.info("Successfully created and started the volume %s",
                   self.volname)

        # peer probe to a new node, N3
        ret, _, err = peer_probe(self.mnode, self.servers[2])
        self.assertEqual(ret, 0, ("Peer probe failed to %s from %s with "
                                  "error message %s" %
                                  (self.servers[2], self.mnode, err)))
        g.log.info("Peer probe from %s to %s is success", self.mnode,
                   self.servers[2])

        # Validate firts three peers are in connected state
        # In jenkins The next step which is add-brick from thried node
        # is failing with peer is not in cluster
        count = 0
        while count < 30:
            ret = is_peer_connected(self.mnode, self.servers[0:3])
            if ret:
                g.log.info("Peers are in connected state")
                break
            sleep(3)
            count = count + 1
        self.assertTrue(ret, "Some peers are not in connected state")

        # add a brick from N3 to the volume
        num_bricks_to_add = 1
        server_info = {}
        server_info[self.servers[2]] = self.all_servers_info[self.servers[2]]
        brick = form_bricks_list(self.mnode, self.volname, num_bricks_to_add,
                                 self.servers[2], server_info)
        ret, _, _ = add_brick(self.mnode, self.volname, brick)
        self.assertEqual(ret, 0,
                         ("Failed to add brick to volume %s" % self.volname))
        g.log.info("add brick to the volume %s is success", self.volname)

        # get volume info, it should have correct brick information
        ret = get_volume_info(self.mnode, self.volname)
        self.assertIsNotNone(
            ret, ("Failed to get volume info from %s" % self.mnode))
        g.log.info("volume info from %s is success", self.mnode)

        brick3 = ret[self.volname]['bricks']['brick'][2]['name']
        self.assertEqual(brick3, str(brick[0]), ("Volume info has incorrect "
                                                 "information"))
        g.log.info("Volume info has correct information")
Example #36
0
def get_internal_ip():
    try:
        return socket.gethostbyname(socket.getfqdn())
    except Exception:
        return None
Example #37
0
 def server_bind(self):
     """Override server_bind to store the server name."""
     socketserver.TCPServer.server_bind(self)
     host, port = self.server_address[:2]
     self.server_name = socket.getfqdn(host)
     self.server_port = port
Example #38
0
class Connection(object):
    """
    Represents a STOMP client connection.
    """
    def __init__(self,
                 host_and_ports=[('localhost', 61613)],
                 user=None,
                 passcode=None,
                 prefer_localhost=True,
                 try_loopback_connect=True,
                 reconnect_sleep_initial=0.1,
                 reconnect_sleep_increase=0.5,
                 reconnect_sleep_jitter=0.1,
                 reconnect_sleep_max=60.0):
        """
        Initialize and start this connection.

        \param host_and_ports
                 a list of (host, port) tuples.

        \param prefer_localhost
                 if True and the local host is mentioned in the (host,
                 port) tuples, try to connect to this first

        \param try_loopback_connect
                 if True and the local host is found in the host
                 tuples, try connecting to it using loopback interface
                 (127.0.0.1)

        \param reconnect_sleep_initial

                 initial delay in seconds to wait before reattempting
                 to establish a connection if connection to any of the
                 hosts fails.

        \param reconnect_sleep_increase

                 factor by which the sleep delay is increased after
                 each connection attempt. For example, 0.5 means
                 to wait 50% longer than before the previous attempt,
                 1.0 means wait twice as long, and 0.0 means keep
                 the delay constant.

        \param reconnect_sleep_max

                 maximum delay between connection attempts, regardless
                 of the reconnect_sleep_increase.

        \param reconnect_sleep_jitter

                 random additional time to wait (as a percentage of
                 the time determined using the previous parameters)
                 between connection attempts in order to avoid
                 stampeding. For example, a value of 0.1 means to wait
                 an extra 0%-10% (randomly determined) of the delay
                 calculated using the previous three parameters.
        """

        sorted_host_and_ports = []
        sorted_host_and_ports.extend(host_and_ports)

        # If localhost is preferred, make sure all (host, port) tuples
        # that refer to the local host come first in the list
        if prefer_localhost:

            def is_local_host(host):
                return host in Connection.__localhost_names

            sorted_host_and_ports.sort(lambda x, y: (int(is_local_host(y[0])) -
                                                     int(is_local_host(x[0]))))

        # If the user wishes to attempt connecting to local ports
        # using the loopback interface, for each (host, port) tuple
        # referring to a local host, add an entry with the host name
        # replaced by 127.0.0.1 if it doesn't exist already
        loopback_host_and_ports = []
        if try_loopback_connect:
            for host_and_port in sorted_host_and_ports:
                if is_local_host(host_and_port[0]):
                    port = host_and_port[1]
                    if (not ("127.0.0.1", port) in sorted_host_and_ports and
                            not ("localhost", port) in sorted_host_and_ports):
                        loopback_host_and_ports.append(("127.0.0.1", port))

        # Assemble the final, possibly sorted list of (host, port) tuples
        self.__host_and_ports = []
        self.__host_and_ports.extend(loopback_host_and_ports)
        self.__host_and_ports.extend(sorted_host_and_ports)

        self.__recvbuf = ''

        self.__listeners = []

        self.__reconnect_sleep_initial = reconnect_sleep_initial
        self.__reconnect_sleep_increase = reconnect_sleep_increase
        self.__reconnect_sleep_jitter = reconnect_sleep_jitter
        self.__reconnect_sleep_max = reconnect_sleep_max

        self.__connect_headers = {}
        if user is not None and passcode is not None:
            self.__connect_headers['login'] = user
            self.__connect_headers['passcode'] = passcode

        self.__socket = None
        self.__current_host_and_port = None

        self.__receiver_thread_exit_condition = threading.Condition()
        self.__receiver_thread_exited = False

    #
    # Manage the connection
    #

    def start(self):
        """
        Start the connection. This should be called after all
        listeners have been registered. If this method is not called,
        no frames will be received by the connection.
        """
        self.__running = True
        self.__attempt_connection()
        _thread.start_new_thread(self.__receiver_loop, ())

    def stop(self):
        """
        Stop the connection. This is equivalent to calling
        disconnect() but will do a clean shutdown by waiting for the
        receiver thread to exit.
        """
        self.disconnect()

        self.__receiver_thread_exit_condition.acquire()
        if not self.__receiver_thread_exited:
            self.__receiver_thread_exit_condition.wait()
        self.__receiver_thread_exit_condition.release()

    def get_host_and_port(self):
        """
        Return a (host, port) tuple indicating which STOMP host and
        port is currently connected, or None if there is currently no
        connection.
        """
        return self.__current_host_and_port

    def is_connected(self):
        try:
            return self.__socket is not None and self.__socket.getsockname(
            )[1] != 0
        except socket.error:
            return False

    #
    # Manage objects listening to incoming frames
    #

    def add_listener(self, listener):
        self.__listeners.append(listener)

    def remove_listener(self, listener):
        self.__listeners.remove(listener)

    #
    # STOMP transmissions
    #

    def subscribe(self, headers={}, **keyword_headers):
        self.__send_frame_helper(
            'SUBSCRIBE', '', self.__merge_headers([headers, keyword_headers]),
            ['destination'])

    def unsubscribe(self, headers={}, **keyword_headers):
        self.__send_frame_helper(
            'UNSUBSCRIBE', '', self.__merge_headers([headers,
                                                     keyword_headers]),
            [('destination', 'id')])

    def send(self, message='', headers={}, **keyword_headers):
        if '\x00' in message:
            content_length_headers = {'content-length': len(message)}
        else:
            content_length_headers = {}
        self.__send_frame_helper(
            'SEND', message,
            self.__merge_headers(
                [headers, keyword_headers, content_length_headers]),
            ['destination'])

    def ack(self, headers={}, **keyword_headers):
        self.__send_frame_helper(
            'ACK', '', self.__merge_headers([headers, keyword_headers]),
            ['message-id'])

    def begin(self, headers={}, **keyword_headers):
        use_headers = self.__merge_headers([headers, keyword_headers])
        if not 'transaction' in list(use_headers.keys()):
            use_headers['transaction'] = _uuid()
        self.__send_frame_helper('BEGIN', '', use_headers, ['transaction'])
        return use_headers['transaction']

    def abort(self, headers={}, **keyword_headers):
        self.__send_frame_helper(
            'ABORT', '', self.__merge_headers([headers, keyword_headers]),
            ['transaction'])

    def commit(self, headers={}, **keyword_headers):
        self.__send_frame_helper(
            'COMMIT', '', self.__merge_headers([headers, keyword_headers]),
            ['transaction'])

    def connect(self, headers={}, **keyword_headers):
        if 'wait' in keyword_headers and keyword_headers['wait']:
            while not self.is_connected():
                time.sleep(0.1)
            del keyword_headers['wait']
        self.__send_frame_helper(
            'CONNECT', '',
            self.__merge_headers(
                [self.__connect_headers, headers, keyword_headers]), [])

    def disconnect(self, headers={}, **keyword_headers):
        self.__send_frame_helper(
            'DISCONNECT', '',
            self.__merge_headers(
                [self.__connect_headers, headers, keyword_headers]), [])
        self.__running = False
        if hasattr(socket, 'SHUT_RDWR'):
            self.__socket.shutdown(socket.SHUT_RDWR)
        if self.__socket:
            self.__socket.close()
        self.__current_host_and_port = None

    # ========= PRIVATE MEMBERS =========

    # List of all host names (unqualified, fully-qualified, and IP
    # addresses) that refer to the local host (both loopback interface
    # and external interfaces).  This is used for determining
    # preferred targets.
    __localhost_names = [
        "localhost", "127.0.0.1",
        socket.gethostbyname(socket.gethostname()),
        socket.gethostname(),
        socket.getfqdn(socket.gethostname())
    ]
    #
    # Used to parse STOMP header lines in the format "key:value",
    #
    __header_line_re = re.compile('(?P<key>[^:]+)[:](?P<value>.*)')

    #
    # Used to parse the STOMP "content-length" header lines,
    #
    __content_length_re = re.compile('^content-length[:]\\s*(?P<value>[0-9]+)',
                                     re.MULTILINE)

    def __merge_headers(self, header_map_list):
        """
        Helper function for combining multiple header maps into one.

        Any underscores ('_') in header names (keys) will be replaced by dashes ('-').
        """
        headers = {}
        for header_map in header_map_list:
            for header_key in list(header_map.keys()):
                headers[header_key] = header_map[header_key]
        return headers

    def __convert_dict(self, payload):
        """
        Encode python dictionary as <map>...</map> structure.
        """

        xmlStr = "<map>\n"
        for key in payload:
            xmlStr += "<entry>\n"
            xmlStr += "<string>%s</string>" % key
            xmlStr += "<string>%s</string>" % payload[key]
            xmlStr += "</entry>\n"
        xmlStr += "</map>"

        return xmlStr

    def __send_frame_helper(self, command, payload, headers,
                            required_header_keys):
        """
        Helper function for sending a frame after verifying that a
        given set of headers are present.

        \param command the command to send

        \param payload the frame's payload

        \param headers a dictionary containing the frame's headers

        \param required_header_keys a sequence enumerating all
        required header keys. If an element in this sequence is itself
        a tuple, that tuple is taken as a list of alternatives, one of
        which must be present.

        \throws ArgumentError if one of the required header keys is
        not present in the header map.
        """
        for required_header_key in required_header_keys:
            if type(required_header_key) == tuple:
                found_alternative = False
                for alternative in required_header_key:
                    if alternative in list(headers.keys()):
                        found_alternative = True
                if not found_alternative:
                    raise KeyError(
                        "Command %s requires one of the following headers: %s"
                        % (command, str(required_header_key)))
            elif not required_header_key in list(headers.keys()):
                raise KeyError("Command %s requires header %r" %
                               (command, required_header_key))
        self.__send_frame(command, headers, payload)

    def __send_frame(self, command, headers={}, payload=''):
        """
        Send a STOMP frame.
        """
        if type(payload) == dict:
            headers["transformation"] = "jms-map-xml"
            payload = self.__convert_dict(payload)

        if self.__socket is not None:
            frame = '%s\n%s\n%s\x00' % (command,
                                        reduce(
                                            lambda accu, key: accu +
                                            ('%s:%s\n' % (key, headers[key])),
                                            list(headers.keys()), ''), payload)
            self.__socket.sendall(frame)
            log.debug("Sent frame: type=%s, headers=%r, body=%r" %
                      (command, headers, payload))
        else:
            raise NotConnectedException()

    def __receiver_loop(self):
        """
        Main loop listening for incoming data.
        """
        try:
            try:
                threading.currentThread().setName("StompReceiver")
                while self.__running:
                    log.debug('starting receiver loop')

                    if self.__socket is None:
                        break

                    try:
                        try:
                            for listener in self.__listeners:
                                if hasattr(listener, 'on_connecting'):
                                    listener.on_connecting(
                                        self.__current_host_and_port)

                            while self.__running:
                                frames = self.__read()

                                for frame in frames:
                                    (frame_type, headers,
                                     body) = self.__parse_frame(frame)
                                    log.debug(
                                        "Received frame: result=%r, headers=%r, body=%r"
                                        % (frame_type, headers, body))
                                    frame_type = frame_type.lower()
                                    if frame_type in [
                                            'connected', 'message', 'receipt',
                                            'error'
                                    ]:
                                        for listener in self.__listeners:
                                            if hasattr(listener,
                                                       'on_%s' % frame_type):
                                                eval(
                                                    'listener.on_%s(headers, body)'
                                                    % frame_type)
                                            else:
                                                log.debug(
                                                    'listener %s has no such method on_%s'
                                                    % (listener, frame_type))
                                    else:
                                        log.warning(
                                            'Unknown response frame type: "%s" (frame length was %d)'
                                            % (frame_type, len(frame)))
                        finally:
                            try:
                                self.__socket.close()
                            except:
                                pass  # ignore errors when attempting to close socket
                            self.__socket = None
                            self.__current_host_and_port = None
                    except ConnectionClosedException:
                        if self.__running:
                            log.error("Lost connection")
                            # Notify listeners
                            for listener in self.__listeners:
                                if hasattr(listener, 'on_disconnected'):
                                    listener.on_disconnected()
                            # Clear out any half-received messages after losing connection
                            self.__recvbuf = ''
                            continue
                        else:
                            break
            except:
                log.exception(
                    "An unhandled exception was encountered in the stomp receiver loop"
                )

        finally:
            self.__receiver_thread_exit_condition.acquire()
            self.__receiver_thread_exited = True
            self.__receiver_thread_exit_condition.notifyAll()
            self.__receiver_thread_exit_condition.release()

    def __read(self):
        """
        Read the next frame(s) from the socket.
        """
        fastbuf = StringIO()
        while self.__running:
            try:
                c = self.__socket.recv(1024)
            except:
                c = ''
            if len(c) == 0:
                raise ConnectionClosedException
            fastbuf.write(c)
            if '\x00' in c:
                break
        self.__recvbuf += fastbuf.getvalue()
        fastbuf.close()
        result = []

        if len(self.__recvbuf) > 0 and self.__running:
            while True:
                pos = self.__recvbuf.find('\x00')
                if pos >= 0:
                    frame = self.__recvbuf[0:pos]
                    preamble_end = frame.find('\n\n')
                    if preamble_end >= 0:
                        content_length_match = Connection.__content_length_re.search(
                            frame[0:preamble_end])
                        if content_length_match:
                            content_length = int(
                                content_length_match.group('value'))
                            content_offset = preamble_end + 2
                            frame_size = content_offset + content_length
                            if frame_size > len(frame):
                                # Frame contains NUL bytes, need to
                                # read more
                                if frame_size < len(self.__recvbuf):
                                    pos = frame_size
                                    frame = self.__recvbuf[0:pos]
                                else:
                                    # Haven't read enough data yet,
                                    # exit loop and wait for more to
                                    # arrive
                                    break
                    result.append(frame)
                    self.__recvbuf = self.__recvbuf[pos + 1:]
                else:
                    break
        return result

    def __transform(self, body, transType):
        """
        Perform body transformation. Currently, the only supported transformation is
        'jms-map-xml', which converts a map into python dictionary. This can be extended
        to support other transformation types.

        The body has the following format:
        <map>
          <entry>
            <string>name</string>
            <string>Dejan</string>
          </entry>
          <entry>
            <string>city</string>
            <string>Belgrade</string>
          </entry>
        </map>

        (see http://docs.codehaus.org/display/STOMP/Stomp+v1.1+Ideas)
        """

        if transType != 'jms-map-xml':
            return body

        try:
            entries = {}
            doc = xml.dom.minidom.parseString(body)
            rootElem = doc.documentElement
            for entryElem in rootElem.getElementsByTagName("entry"):
                pair = []
                for node in entryElem.childNodes:
                    if not isinstance(node, xml.dom.minidom.Element): continue
                    pair.append(node.firstChild.nodeValue)
                assert len(pair) == 2
                entries[pair[0]] = pair[1]
            return entries
        except Exception as ex:
            # unable to parse message. return original
            return body

    def __parse_frame(self, frame):
        """
        Parse a STOMP frame into a (frame_type, headers, body) tuple,
        where frame_type is the frame type as a string (e.g. MESSAGE),
        headers is a map containing all header key/value pairs, and
        body is a string containing the frame's payload.
        """
        preamble_end = frame.find('\n\n')
        preamble = frame[0:preamble_end]
        preamble_lines = preamble.split('\n')
        body = frame[preamble_end + 2:]

        # Skip any leading newlines
        first_line = 0
        while first_line < len(preamble_lines) and len(
                preamble_lines[first_line]) == 0:
            first_line += 1

        # Extract frame type
        frame_type = preamble_lines[first_line]

        # Put headers into a key/value map
        headers = {}
        for header_line in preamble_lines[first_line + 1:]:
            header_match = Connection.__header_line_re.match(header_line)
            if header_match:
                headers[header_match.group('key')] = header_match.group(
                    'value')

        if 'transformation' in headers:
            body = self.__transform(body, headers['transformation'])

        return (frame_type, headers, body)

    def __attempt_connection(self):
        """
        Try connecting to the (host, port) tuples specified at construction time.
        """

        sleep_exp = 1
        while self.__running and self.__socket is None:
            for host_and_port in self.__host_and_ports:
                try:
                    log.debug("Attempting connection to host %s, port %s" %
                              host_and_port)
                    self.__socket = socket.socket(socket.AF_INET,
                                                  socket.SOCK_STREAM)
                    self.__socket.settimeout(None)
                    self.__socket.connect(host_and_port)
                    self.__current_host_and_port = host_and_port
                    log.info("Established connection to host %s, port %s" %
                             host_and_port)
                    break
                except socket.error:
                    self.__socket = None
                    if type(sys.exc_info()[1]) == tuple:
                        exc = sys.exc_info()[1][1]
                    else:
                        exc = sys.exc_info()[1]
                    log.warning("Could not connect to host %s, port %s: %s" %
                                (host_and_port[0], host_and_port[1], exc))

            if self.__socket is None:
                sleep_duration = (min(self.__reconnect_sleep_max, (
                    (self.__reconnect_sleep_initial /
                     (1.0 + self.__reconnect_sleep_increase)) *
                    math.pow(1.0 + self.__reconnect_sleep_increase, sleep_exp)
                )) * (1.0 + random.random() * self.__reconnect_sleep_jitter))
                sleep_end = time.time() + sleep_duration
                log.debug(
                    "Sleeping for %.1f seconds before attempting reconnect" %
                    sleep_duration)
                while self.__running and time.time() < sleep_end:
                    time.sleep(0.2)

                if sleep_duration < self.__reconnect_sleep_max:
                    sleep_exp += 1
Example #39
0
def get_hostname():
    """
    Return the hostname for the current system
    """
    return socket.getfqdn()
Example #40
0
 def __init__(self, queue, hostname=None):
     super(Edge, self).__init__()
     self.queue = queue
     self.hostname = hostname or socket.getfqdn()
Example #41
0
#
# Requires
# - just python 2.6
#
########################################################
#
# Test
# - echo -e "GET /\n\n" | nc localhost 8080
########################################################

import logging
import sys
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import socket

MY_IP=socket.gethostbyname(socket.getfqdn())

class ServerHandler(BaseHTTPRequestHandler):

    def do_GET(self):
        global MY_IP
        self.send_response(200)
        self.send_header('Content-type', 'text/html')
        self.end_headers()
        self.wfile.write("Hello %s, I am %s.\n" % (self.client_address[0], MY_IP))


class Server():

    def __init__(self, args):
        self.args = args
Example #42
0
    async def do_hooks(self, build_id):
        hooks = []
        with Session() as session:
            build = session.query(Build).filter(Build.id == build_id).first()
            if not build:
                logger.error("hooks: build {} not found".format(build_id))
                return

            maintainer = build.maintainer

            cfg_host = Configuration().hostname
            hostname = cfg_host if cfg_host else socket.getfqdn()

            class ResultObject:
                pass

            repository = ResultObject()
            if build.sourcerepository:
                repository.url = build.sourcerepository.url
                repository.name = build.sourcerepository.name

            buildres = ResultObject()
            buildres.id = build.id
            buildres.status = build.buildstate
            buildres.version = build.version
            buildres.url = "http://{}/build/{}".format(hostname, build.id)
            buildres.raw_log_url = "http://{}/buildout/{}/build.log".format(
                hostname, build.id)
            buildres.commit = build.git_ref
            buildres.branch = build.ci_branch

            platform = ResultObject()
            if build.projectversion.basemirror:
                platform.distrelease = build.projectversion.basemirror.project.name
                platform.version = build.projectversion.basemirror.name
                platform.architecture = build.architecture

            project = ResultObject()
            if build.projectversion:
                project.name = build.projectversion.project.name
                project.version = build.projectversion.name

            args = {
                "repository": repository,
                "build": buildres,
                "platform": platform,
                "maintainer": maintainer,
                "project": project,
            }

            if not build.sourcerepository or not build.projectversion:
                logger.warning("hook: no source repo and no projectversion")
                return

            buildconfig = session.query(SouRepProVer).filter(
                SouRepProVer.sourcerepository_id == build.sourcerepository_id,
                SouRepProVer.projectversion_id ==
                build.projectversion_id).first()
            if not buildconfig:
                logger.warning("hook: source repo not in projectversion")
                return
            postbuildhooks = session.query(Hook).join(PostBuildHook).filter(
                PostBuildHook.sourcerepositoryprojectversion_id ==
                buildconfig.id)
            for hook in postbuildhooks:
                method = None
                url = None
                skip_ssl = None
                body = None

                if not hook.enabled:
                    logger.warning("hook: not enabled")
                    continue

                if build.buildtype == "build" and not hook.notify_overall:
                    logger.info("hook: top build not enabled")
                    continue

                if build.buildtype == "source" and not hook.notify_src:
                    logger.info("hook: src build not enabled")
                    continue

                if build.buildtype == "deb" and not hook.notify_deb:
                    logger.info("hook: deb build not enabled")
                    continue

                try:
                    url = Template(hook.url).render(**args)
                except Exception as exc:
                    logger.error("hook: error rendering URL template", url,
                                 exc)
                    continue

                if hook.body:
                    try:
                        body = Template(hook.body).render(**args)
                    except Exception as exc:
                        logger.error("hook: error rendering BODY template",
                                     url, exc)
                        continue

                method = hook.method
                skip_ssl = hook.skip_ssl

                logger.info("adding hook: %s" % url)
                hooks.append((method, url, skip_ssl, body))

        for hook in hooks:
            try:
                await trigger_hook(hook[0],
                                   hook[1],
                                   skip_ssl=hook[2],
                                   body=hook[3])
            except Exception as exc:
                logger.error("hook: error calling {} '{}': {}".format(
                    hook[0], hook[1], exc))
Example #43
0
def add_redirection():

    parser = argparse.ArgumentParser(prog='add_redirection.py',
                                     description='A tool for add redirections')

    parser.add_argument('--mailbox',
                        help='Mailbox to add redirection',
                        required=True)

    parser.add_argument('--redirection',
                        help='Mailbox to redirect the mail',
                        required=True)

    args = parser.parse_args()

    json_return = {
        'error': 0,
        'status': 0,
        'progress': 0,
        'no_progress': 0,
        'message': ''
    }

    check_lock('virtual_domains')

    try:

        user, domain = args.mailbox.split("@")

        user_redirection, domain_redirection = args.redirection.split("@")

    except ValueError:
        try:

            user_redirection, domain_redirection, tld = args.redirection.split(
                "@")
            #Check if domain is the host domain
            hostname = 'autoreply.' + socket.getfqdn()

            if tld != hostname:
                json_return['error'] = 1
                json_return['status'] = 1
                json_return['progress'] = 100
                json_return[
                    'message'] = 'Error: not valid hostname for the service'

                print(json.dumps(json_return))

                unlock_file('virtual_domains')

                exit(1)

        except ValueError:

            json_return['error'] = 1
            json_return['status'] = 1
            json_return['progress'] = 100
            json_return['message'] = 'Error: domain or user is not valid'

            print(json.dumps(json_return))

            unlock_file('virtual_domains')

            exit(1)

    except:
        json_return['error'] = 1
        json_return['status'] = 1
        json_return['progress'] = 100
        json_return['message'] = 'Error: domain or user is not valid'

        print(json.dumps(json_return))

        unlock_file('virtual_domains')

        exit(1)

    #mailbox_user=args.mailbox.replace("@", "_")

    domain_check = re.compile(
        '^(([a-zA-Z]{1})|([a-zA-Z]{1}[a-zA-Z]{1})|([a-zA-Z]{1}[0-9]{1})|([0-9]{1}[a-zA-Z]{1})|([a-zA-Z0-9][a-zA-Z0-9-_]{1,61}[a-zA-Z0-9]))\.([a-zA-Z]{2,6}|[a-zA-Z0-9-]{2,30}\.[a-zA-Z]{2,3})$'
    )

    user_check = re.compile('^[a-zA-Z0-9-_|\.]+$')

    redirection_check = re.compile('^' + args.mailbox + ' .*$')

    if not domain_check.match(domain) or not user_check.match(
            user) or not domain_check.match(
                domain_redirection) or not user_check.match(user_redirection):
        json_return['error'] = 1
        json_return['status'] = 1
        json_return['progress'] = 100
        json_return['message'] = 'Error: domain or user is not valid'

        print(json.dumps(json_return))

        unlock_file('virtual_domains')

        exit(1)

    json_return['progress'] = 25
    json_return['message'] = 'Is a valid mailbox and redirection'

    print(json.dumps(json_return))
    time.sleep(1)
    """
    try:
        
        user_pwd=pwd.getpwnam(mailbox_user) 
        
    except KeyError:
        json_return['error']=1
        json_return['status']=1
        json_return['progress']=100
        json_return['message']='Error: user no exists'

        print(json.dumps(json_return))

        sys.exit(1)
    """

    # Add user to virtual_mailbox

    #mailbox=args.user+'@'+args.domain
    #mailbox_user=args.user+'_'+args.domain

    # You can add many redirections

    #Check that if domain exists

    domain_line = domain + ' ' + domain

    redirection_line = args.mailbox + ' ' + args.redirection

    yes_domain = 0

    with open('/etc/postfix/virtual_domains') as f:
        for l in f:
            l = l.strip()
            if l == domain_line:
                yes_domain = 1
                break

    no_same_redirection = 1

    arr_line = [redirection_line]

    with open('/etc/postfix/virtual_mailbox') as f:
        for l in f:
            l = l.strip()
            if redirection_check.match(l):
                ls = l.split(' ')
                redirections = ls[1].split(',')
                #print(redirections)
                if args.redirection in redirections:
                    no_same_redirection = 0
                else:
                    redirections.append(args.redirection)
                    redirection_line = args.mailbox + ' ' + ','.join(
                        redirections)
                    arr_line.append(redirection_line)
                    del arr_line[0]
            else:
                arr_line.append(l)

    if yes_domain == 1 and no_same_redirection == 1:

        #Add redirection

        with open('/etc/postfix/virtual_mailbox', 'w') as f:
            if f.write("\n".join(arr_line) + "\n"):
                json_return['progress'] = 50
                json_return['message'] = 'Redirection added'

                print(json.dumps(json_return))
                time.sleep(1)
            else:
                json_return['error'] = 1
                json_return['status'] = 1
                json_return['progress'] = 100
                json_return[
                    'message'] = 'Error: cannot add the new redirection to file'

                print(json.dumps(json_return))

                unlock_file('virtual_domains')

                exit(1)

        if call("postmap hash:/etc/postfix/virtual_mailbox",
                shell=True,
                stdout=DEVNULL) > 0:

            json_return['error'] = 1
            json_return['status'] = 1
            json_return['progress'] = 100
            json_return['message'] = 'Error: cannot refresh the domain mapper'

            print(json.dumps(json_return))

            unlock_file('virtual_domains')

            exit(1)

        json_return['progress'] = 100
        json_return['status'] = 1
        json_return['message'] = 'Redirection added sucessfully'

        print(json.dumps(json_return))
    else:
        json_return['error'] = 1
        json_return['status'] = 1
        json_return['progress'] = 100
        json_return[
            'message'] = 'Error: domain doesn\'t exists or same redirection exists'

        print(json.dumps(json_return))

        unlock_file('virtual_domains')

        exit(1)

    unlock_file('virtual_domains')
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

' exercise name:3.socket-threading-server '

__author__ = 'bingshuizhilian'



# 仅支持一个客户端,收发消息不阻塞

import socket, threading, datetime

pcname = socket.getfqdn(socket.gethostname())
ip_addr = socket.gethostbyname(pcname)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((ip_addr, 9999))
s.listen(2)
print('等待用户连接中...')
sock, addr = s.accept()
if sock is not None:
    print('用户%s:%d已连接!!!' % (sock.getsockname()[0], sock.getsockname()[1]))

keepFlag = True

def rec(sk):
    global keepFlag
    while keepFlag:
        t = sk.recv(1024).decode('utf-8')
        if 'exit' == t:
           keepFlag = False
Example #45
0
    def serve(self):
        class Root(object):

            # collapse everything to '/'
            def _cp_dispatch(self, vpath):
                cherrypy.request.path = ''
                return self

            @cherrypy.expose
            def index(self):
                return '''<!DOCTYPE html>
<html>
    <head><title>Ceph Exporter</title></head>
    <body>
        <h1>Ceph Exporter</h1>
        <p><a href='/metrics'>Metrics</a></p>
    </body>
</html>'''

            @cherrypy.expose
            def metrics(self):
                instance = global_instance()
                # Lock the function execution
                try:
                    instance.collect_lock.acquire()
                    return self._metrics(instance)
                finally:
                    instance.collect_lock.release()

            @staticmethod
            def _metrics(instance):
                # Return cached data if available and collected before the
                # cache times out
                if instance.collect_cache and time.time(
                ) - instance.collect_time < instance.collect_timeout:
                    cherrypy.response.headers['Content-Type'] = 'text/plain'
                    return instance.collect_cache

                if instance.have_mon_connection():
                    instance.collect_cache = None
                    instance.collect_time = time.time()
                    instance.collect_cache = instance.collect()
                    cherrypy.response.headers['Content-Type'] = 'text/plain'
                    return instance.collect_cache
                else:
                    raise cherrypy.HTTPError(503, 'No MON connection')

        # Make the cache timeout for collecting configurable
        self.collect_timeout = self.get_localized_module_option(
            'scrape_interval', 5.0)

        server_addr = self.get_localized_module_option('server_addr',
                                                       DEFAULT_ADDR)
        server_port = self.get_localized_module_option('server_port',
                                                       DEFAULT_PORT)
        self.log.info("server_addr: %s server_port: %s" %
                      (server_addr, server_port))

        # Publish the URI that others may use to access the service we're
        # about to start serving
        self.set_uri('http://{0}:{1}/'.format(
            socket.getfqdn() if server_addr == '::' else server_addr,
            server_port))

        cherrypy.config.update({
            'server.socket_host': server_addr,
            'server.socket_port': int(server_port),
            'engine.autoreload.on': False
        })
        cherrypy.tree.mount(Root(), "/")
        self.log.info('Starting engine...')
        cherrypy.engine.start()
        self.log.info('Engine started.')
        # wait for the shutdown event
        self.shutdown_event.wait()
        self.shutdown_event.clear()
        cherrypy.engine.stop()
        self.log.info('Engine stopped.')
        self.shutdown_rbd_stats()
Example #46
0
    def run(self, config, options, args, help=None):
        if options.output:
            output = StringIO()
            global curses
            if curses and config.progress_bar:
                try:
                    curses.setupterm()
                except:
                    curses = None
        else:
            output = sys.stdout

        if not self.checks:
            self.load_checks_from_options(options.checks)

        self.load_bugs(options.bugfile)
        self.load_false_positives(options.falsepositivesfile)

        config.devhelp_dirname = options.devhelp_dirname
        config.partial_build = False

        module_set = jhbuild.moduleset.load(config)
        if options.list_all_modules:
            self.module_list = module_set.modules.values()
        else:
            self.module_list = module_set.get_module_list(
                args or config.modules, config.skip)

        results = {}
        try:
            cachedir = os.path.join(os.environ['XDG_CACHE_HOME'], 'jhbuild')
        except KeyError:
            cachedir = os.path.join(os.environ['HOME'], '.cache', 'jhbuild')
        if options.cache:
            try:
                results = cPickle.load(
                    file(os.path.join(cachedir, options.cache)))
            except:
                pass

        self.repeat_row_header = 0
        if len(self.checks) > 4:
            self.repeat_row_header = 1

        for module_num, mod in enumerate(self.module_list):
            if mod.type in ('meta', 'tarball'):
                continue
            if not mod.branch or not mod.branch.repository.__class__.__name__ in (
                    'SubversionRepository', 'GitRepository'):
                if not mod.moduleset_name.startswith('gnome-external-deps'):
                    continue

            if not os.path.exists(mod.branch.srcdir):
                continue

            tree_id = mod.branch.tree_id()
            valid_cache = (tree_id and results.get(mod.name, {}).get('tree-id')
                           == tree_id)

            if not mod.name in results:
                results[mod.name] = {'results': {}}
            results[mod.name]['tree-id'] = tree_id
            r = results[mod.name]['results']
            for check in self.checks:
                if valid_cache and check.__name__ in r:
                    continue
                try:
                    c = check(config, mod)
                except ExcludedModuleException:
                    continue

                if output != sys.stdout and config.progress_bar:
                    progress_percent = 1.0 * (module_num - 1) / len(
                        self.module_list)
                    msg = '%s: %s' % (mod.name, check.__name__)
                    self.display_status_line(progress_percent, module_num, msg)

                try:
                    c.run()
                except CouldNotPerformCheckException:
                    continue
                except ExcludedModuleException:
                    continue

                try:
                    c.fix_false_positive(
                        self.false_positives.get((mod.name, check.__name__)))
                except ExcludedModuleException:
                    continue

                r[check.__name__] = [c.status, c.complexity, c.result_comment]

        if not os.path.exists(cachedir):
            os.makedirs(cachedir)
        if options.cache:
            cPickle.dump(results,
                         file(os.path.join(cachedir, options.cache), 'w'))

        print >> output, HTML_AT_TOP % {'title': self.title}
        if self.page_intro:
            print >> output, self.page_intro
        print >> output, '<table>'
        print >> output, '<thead>'
        print >> output, '<tr><td></td>'
        for check in self.checks:
            print >> output, '<th>%s</th>' % check.__name__
        print >> output, '<td></td></tr>'
        if [x for x in self.checks if x.header_note]:
            print >> output, '<tr><td></td>'
            for check in self.checks:
                print >> output, '<td>%s</td>' % (check.header_note or '')
            print >> output, '</tr>'
        print >> output, '</thead>'
        print >> output, '<tbody>'

        suites = []
        for module_key, module in module_set.modules.items():
            if not isinstance(module_set.get_module(module_key), MetaModule):
                continue
            if module_key.endswith('upcoming-deprecations'):
                # mark deprecated modules as processed, so they don't show in "Others"
                try:
                    metamodule = module_set.get_module(meta_key)
                except KeyError:
                    continue
                for module_name in metamodule.dependencies:
                    processed_modules[module_name] = True
            else:
                suites.append([module_key, module_key.replace('meta-', '')])

        processed_modules = {'gnome-common': True}

        not_other_module_names = []
        for suite_key, suite_label in suites:
            metamodule = module_set.get_module(suite_key)
            module_names = [x for x in metamodule.dependencies if x in results]
            if not module_names:
                continue
            print >> output, '<tr><td class="heading" colspan="%d">%s</td></tr>' % (
                1 + len(self.checks) + self.repeat_row_header, suite_label)
            for module_name in module_names:
                if module_name in not_other_module_names:
                    continue
                r = results[module_name].get('results')
                print >> output, self.get_mod_line(module_name, r)
                processed_modules[module_name] = True
            not_other_module_names.extend(module_names)

        external_deps = [x for x in results.keys() if \
                         x in [y.name for y in self.module_list] and \
                         not x in processed_modules and \
                         module_set.get_module(x).moduleset_name.startswith('gnome-external-deps')]
        if external_deps:
            print >> output, '<tr><td class="heading" colspan="%d">%s</td></tr>' % (
                1 + len(self.checks) + self.repeat_row_header,
                'External Dependencies')
            for module_name in sorted(external_deps):
                if not module_name in results:
                    continue
                r = results[module_name].get('results')
                try:
                    version = module_set.get_module(module_name).branch.version
                except:
                    version = None
                print >> output, self.get_mod_line(module_name,
                                                   r,
                                                   version_number=version)

        other_module_names = [x for x in results.keys() if \
                              not x in processed_modules and not x in external_deps]
        if other_module_names:
            print >> output, '<tr><td class="heading" colspan="%d">%s</td></tr>' % (
                1 + len(self.checks) + self.repeat_row_header, 'Others')
            for module_name in sorted(other_module_names):
                if not module_name in results:
                    continue
                r = results[module_name].get('results')
                print >> output, self.get_mod_line(module_name, r)
        print >> output, '</tbody>'
        print >> output, '<tfoot>'

        print >> output, '<tr><td></td>'
        for check in self.checks:
            print >> output, '<th>%s</th>' % check.__name__
        print >> output, '<td></td></tr>'

        print >> output, self.get_stat_line(results, not_other_module_names)
        print >> output, '</tfoot>'
        print >> output, '</table>'

        if (options.bugfile and options.bugfile.startswith('http://')) or \
                (options.falsepositivesfile and options.falsepositivesfile.startswith('http://')):
            print >> output, '<div id="data">'
            print >> output, '<p>The following data sources are used:</p>'
            print >> output, '<ul>'
            if options.bugfile.startswith('http://'):
                print >> output, '  <li><a href="%s">Bugs</a></li>' % options.bugfile
            if options.falsepositivesfile.startswith('http://'):
                print >> output, '  <li><a href="%s">False positives</a></li>' % options.falsepositivesfile
            print >> output, '</ul>'
            print >> output, '</div>'

        print >> output, '<div id="footer">'
        print >> output, 'Generated:', time.strftime('%Y-%m-%d %H:%M:%S %z')
        print >> output, 'on ', socket.getfqdn()
        print >> output, '</div>'

        print >> output, '</body>'
        print >> output, '</html>'

        if output != sys.stdout:
            file(options.output, 'w').write(output.getvalue())

        if output != sys.stdout and config.progress_bar:
            sys.stdout.write('\n')
            sys.stdout.flush()
Example #47
0
def run(args, user, log_path):
    """ Run the WMF auto reimage according to command line arguments

        Arguments:
        args     -- parsed command line arguments
        user     -- the user that launched the script, for auditing purposes
        log_path -- the path of the logfile
    """
    # Get additional informations
    ipmi_password = get_ipmi_password()
    custom_mgmts = get_custom_mgmts(args.hosts)
    icinga_host = resolve_dns(ICINGA_DOMAIN, 'CNAME')
    puppetmaster_host = resolve_dns(PUPPET_DOMAIN, 'CNAME')
    deployment_host = resolve_dns(DEPLOYMENT_DOMAIN, 'CNAME')
    phab_client = get_phabricator_client()
    hosts = args.hosts
    hosts_status = None

    # Validate hosts
    validate_hosts(puppetmaster_host, args.hosts, args.no_verify)

    # Update the Phabricator task
    if args.phab_task_id is not None:
        phabricator_task_update(
            phab_client, args.phab_task_id,
            PHAB_COMMENT_PRE.format(user=user,
                                    hostname=socket.getfqdn(),
                                    hosts=hosts,
                                    log=log_path))

    # Set downtime on Icinga
    hosts = icinga_downtime(icinga_host, hosts, user, args.phab_task_id)

    # Depool via conftool
    if args.conftool:
        hosts_status = conftool_depool_hosts(puppetmaster_host, hosts)
        hosts = conftool_ensure_depooled(puppetmaster_host, hosts)
        # Run Puppet on the deployment host to update DSH groups
        if len(hosts) > 0:
            run_puppet([deployment_host])

    # Start the reimage
    reimage_time = datetime.now()
    hosts = reimage_hosts(puppetmaster_host,
                          hosts,
                          custom_mgmts=custom_mgmts,
                          ipmi_password=ipmi_password)
    hosts = check_reimage(puppetmaster_host, hosts)
    hosts = check_uptime(hosts,
                         maximum=int(
                             (datetime.now() - reimage_time).total_seconds()))

    # Wait for Puppet
    hosts = wait_puppet_run(hosts, start=reimage_time)

    if not args.no_reboot:
        # Issue a reboot and wait for it and also for Puppet to complete
        reboot_time = datetime.now()
        hosts = reboot_hosts(hosts)
        boot_time = datetime.now()
        hosts = wait_reboot(hosts)
        hosts = check_uptime(hosts,
                             maximum=int((datetime.now() -
                                          reboot_time).total_seconds()))
        hosts = wait_puppet_run(hosts, start=boot_time)

    # Check Icinga alarms
    # TODO

    # Run Apache fast test
    if args.apache:
        hosts = run_apache_fast_test(deployment_host, hosts)

    # The repool is *not* done automatically the command to repool is added
    # to the Phabricator task

    # Comment on the Phabricator task
    if args.phab_task_id is not None:
        phabricator_message = get_phabricator_post_message(
            args.hosts, hosts, hosts_status=hosts_status)
        phabricator_task_update(phab_client, args.phab_task_id,
                                phabricator_message)

    logger.info(("Auto reimaging of hosts '{hosts}' completed, hosts "
                 "'{successful}' were successful.").format(hosts=args.hosts,
                                                           successful=hosts))
Example #48
0
def getHostDomain():
    site = ''
    import socket
    site = socket.getfqdn()
    fqdn = site.split('.')
    return fqdn[0], fqdn[-2] + '.' + fqdn[-1]
Example #49
0
            rlock.release()

    return newFunction


def checkDiskFull():
    ''' check disk usage and reject request require disk when disk is full '''
    appGlobal = pylons.config['pylons.app_globals']
    if not appGlobal.diskOk:
        msg = 'Cannot complete operation safely, disk is full'
        raise AgentException(Errors.HEALTH_DISKFULL, msg)


ipaddr = getHostIP()
hostname = socket.gethostname()
fqdn = socket.getfqdn()


def trackable():
    ''' logging '''
    def newf(f, self, *args, **kw):
        ''' wrapper '''
        errMsg = None
        startTs = time.time() * 1000

        try:
            result = f(self, *args, **kw)
            return result

        except BaseException as excep:
            errMsg = 'Error status (%s) - %s' % (excep,
Example #50
0
def cmd_listen(workingdir, cert_path):
    cwd = os.getcwd()
    try:
        common.ch_dir(workingdir, logger)
        #just load up the password for later
        read_private(True)

        serveraddr = ('', common.CRL_PORT)
        server = ThreadedCRLServer(serveraddr, CRLHandler)
        if os.path.exists('cacrl.der'):
            logger.info("Loading existing crl: %s" %
                        os.path.abspath("cacrl.der"))
            with open('cacrl.der', 'rb') as f:
                server.setcrl(f.read())
        t = threading.Thread(target=server.serve_forever)
        logger.info("Hosting CRL on %s:%d" %
                    (socket.getfqdn(), common.CRL_PORT))
        t.start()

        def check_expiration():
            logger.info("checking CRL for expiration every hour")
            while True:
                try:
                    if os.path.exists('cacrl.der'):
                        retout = cmd_exec.run(
                            "openssl crl -inform der -in cacrl.der -text -noout",
                            lock=False)['retout']
                        for line in retout:
                            line = line.strip()
                            if line.startswith(b"Next Update:"):
                                expire = datetime.datetime.strptime(
                                    line[13:].decode('utf-8'),
                                    "%b %d %H:%M:%S %Y %Z")
                                # check expiration within 6 hours
                                in1hour = datetime.datetime.utcnow(
                                ) + datetime.timedelta(hours=6)
                                if expire <= in1hour:
                                    logger.info(
                                        "Certificate to expire soon %s, re-issuing"
                                        % expire)
                                    cmd_regencrl(workingdir)
                    # check a little less than every hour
                    time.sleep(3540)

                except KeyboardInterrupt:
                    logger.info("TERM Signal received, shutting down...")
                    #server.shutdown()
                    break

        t2 = threading.Thread(target=check_expiration)
        t2.setDaemon(True)
        t2.start()

        def revoke_callback(revocation):
            json_meta = json.loads(revocation['meta_data'])
            serial = json_meta['cert_serial']
            if revocation.get('type', None) != 'revocation' or serial is None:
                logger.error("Unsupported revocation message: %s" % revocation)
                return

            logger.info("Revoking certificate: %s" % serial)
            server.setcrl(cmd_revoke(workingdir, None, serial))

        try:
            while True:
                try:
                    revocation_notifier.await_notifications(
                        revoke_callback, revocation_cert_path=cert_path)
                except Exception as e:
                    logger.exception(e)
                    logger.warning(
                        "No connection to revocation server, retrying in 10s..."
                    )
                    time.sleep(10)
        except KeyboardInterrupt:
            logger.info("TERM Signal received, shutting down...")
            server.shutdown()
            sys.exit()
    finally:
        os.chdir(cwd)
Example #51
0
def hostip():
    try:
        return socket.gethostbyname(socket.getfqdn(socket.gethostname()))
    except Exception as e:
        return '未知IP'
Example #52
0
import os
import socket


sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_hostname = socket.gethostname()
local_fqdn = socket.getfqdn()
ip_addr = socket.gethostbyname(local_hostname)
print(f'Local hostname: {local_hostname}')
print(f'Local FQDN: {local_fqdn}')
print(f'Ip Address: {ip_addr}')

server_addr = (ip_addr, 23456)
sock.connect(server_addr)


filename = 'testfile.txt'
script_dir = os.path.dirname(__file__)
rel_path = f'receive/{filename}'
abs_file_path = os.path.join(script_dir, rel_path)

with open(abs_file_path, 'wb') as f:
    print('file opened')
    while True:
        print('receiving data')
        data = sock.recv(64)
        print(f'data={data}')
        if not data:
            break
        f.write(data)
Example #53
0
            log("SonosDiscovery: Searching for devices using gethostname %s" %
                interfaceAddr)
            if interfaceAddr not in [None, '']:
                sonos_devices = discover(timeout=5,
                                         interface_addr=interfaceAddr)
        except:
            log("SonosDiscovery: Exception when getting devices",
                xbmc.LOGERROR)
            log("SonosDiscovery: %s" % traceback.format_exc(), xbmc.LOGERROR)
            sonos_devices = []

    # If still not found, try yet another method
    if (sonos_devices is None) or (len(sonos_devices) < 1):
        try:
            # Try and find the address ourselves
            interfaceAddr = socket.gethostbyname(socket.getfqdn())
            log("SonosDiscovery: Searching for devices using getfqdn %s" %
                interfaceAddr)
            if interfaceAddr not in [None, '']:
                sonos_devices = discover(timeout=5,
                                         interface_addr=interfaceAddr)
        except:
            log("SonosDiscovery: Exception when getting devices",
                xbmc.LOGERROR)
            log("SonosDiscovery: %s" % traceback.format_exc(), xbmc.LOGERROR)
            sonos_devices = []

    # If still not found, try the last option
    if (sonos_devices is None) or (len(sonos_devices) < 1):
        try:
            # Try and find the address ourselves by going to a web page
Example #54
0
def hostname():
    try:
        return socket.getfqdn(socket.gethostname())
    except Exception as e:
        return '未知主机'
Example #55
0
def initialize(context, plan_config, cols_config, data_config,
               aggregator_address, feature_shape):
    """
    Initialize Data Science plan.

    Create a protocol buffer file of the initial model weights for
     the federation.
    """
    plan = Plan.Parse(plan_config_path=Path(plan_config),
                      cols_config_path=Path(cols_config),
                      data_config_path=Path(data_config))

    init_state_path = plan.config['aggregator']['settings']['init_state_path']

    # TODO:  Is this part really needed?  Why would we need to collaborator
    #  name to know the input shape to the model?

    # if  feature_shape is None:
    #     if  cols_config is None:
    #         exit('You must specify either a feature
    #         shape or authorized collaborator
    #         list in order for the script to determine the input layer shape')
    print(plan.cols_data_paths)

    collaborator_cname = list(plan.cols_data_paths)[0]

    # else:

    #     logger.info(f'Using data object of type {type(data)}
    #     and feature shape {feature_shape}')
    #     raise NotImplementedError()

    # data_loader = plan.get_data_loader(collaborator_cname)
    # task_runner = plan.get_task_runner(collaborator_cname)

    data_loader = plan.get_data_loader(collaborator_cname)
    task_runner = plan.get_task_runner(data_loader)
    tensor_pipe = plan.get_tensor_pipe()

    # I believe there is no need for this line as task_runner has this variable
    # initialized with empty dict tensor_dict_split_fn_kwargs =
    # task_runner.tensor_dict_split_fn_kwargs or {}
    tensor_dict, holdout_params = split_tensor_dict_for_holdouts(
        logger, task_runner.get_tensor_dict(False),
        **task_runner.tensor_dict_split_fn_kwargs)

    logger.warn(f'Following parameters omitted from global initial model, '
                f'local initialization will determine'
                f' values: {list(holdout_params.keys())}')

    model_snap = utils.construct_model_proto(tensor_dict=tensor_dict,
                                             round_number=0,
                                             tensor_pipe=tensor_pipe)

    logger.info(f'Creating Initial Weights File    🠆 {init_state_path}')

    utils.dump_proto(model_proto=model_snap, fpath=init_state_path)

    plan_origin = Plan.Parse(Path(plan_config), resolve=False).config

    if (plan_origin['network']['settings']['agg_addr'] == 'auto'
            or aggregator_address):
        plan_origin['network']['settings'] = plan_origin['network'].get(
            'settings', {})
        plan_origin['network']['settings']['agg_addr'] =\
            aggregator_address or getfqdn()

        logger.warn(f"Patching Aggregator Addr in Plan"
                    f" 🠆 {plan_origin['network']['settings']['agg_addr']}")

        Plan.Dump(Path(plan_config), plan_origin)

    plan.config = plan_origin

    # Record that plan with this hash has been initialized
    if 'plans' not in context.obj:
        context.obj['plans'] = []
    context.obj['plans'].append(f"{Path(plan_config).stem}_{plan.hash[:8]}")
    logger.info(f"{context.obj['plans']}")
 def get_hostname(self):
     hostname = socket.gethostname()
     return socket.getfqdn(hostname)
Example #57
0
def get_address():
    import socket
    host_name = socket.getfqdn(socket.gethostname())
    return socket.gethostbyname(host_name)
Example #58
0
    'SMTP_USE_TLS': False,
    'SQLALCHEMY_DATABASE_URI': None,
    'SQLALCHEMY_MAX_OVERFLOW': 3,
    'SQLALCHEMY_POOL_RECYCLE': 120,
    'SQLALCHEMY_POOL_SIZE': 5,
    'SQLALCHEMY_POOL_TIMEOUT': 10,
    'STATIC_FILE_METHOD': None,
    'STATIC_SITE_STORAGE': None,
    'STORAGE_BACKENDS': {
        'default': 'fs:/opt/indico/archive'
    },
    'STRICT_LATEX': False,
    'SUPPORT_EMAIL': None,
    'TEMP_DIR': '/opt/indico/tmp',
    'USE_PROXY': False,
    'WORKER_NAME': socket.getfqdn(),
    'XELATEX_PATH': None,
}

# Default values for settings that cannot be set in the config file
INTERNAL_DEFAULTS = {
    'CONFIG_PATH': os.devnull,
    'CONFIG_PATH_RESOLVED': None,
    'LOGGING_CONFIG_PATH': None,
    'TESTING': False
}


def get_config_path():
    """Get the path of the indico config file.
Example #59
0
 def jinja_globals():
     return {
         'hostname': socket.getfqdn(),
         'navbar_color': conf.get('webserver', 'NAVBAR_COLOR'),
     }
Example #60
0
    r = requests.get(config_url)

print 'Fetched node info'

config = r.json()

if config['role'] == 'leader':
    #swarm init
    s = client.swarm.init(force_new_cluster=True)

    print 'Swarm initialized'
    
    worker_token = client.swarm.attrs['JoinTokens']['Worker']
    manager_token = client.swarm.attrs['JoinTokens']['Manager']

    ip = "%s:%s" % (socket.gethostbyname(socket.getfqdn()),'2377')
    r = requests.post(info_url, json = {'ip':ip, 'tokens':{'worker':worker_token, 'manager':manager_token}})

    if r.status_code != 200:
        print 'Failed to post swarm info'
        sys.exit(1)

    print 'Posted swarm info'
else:
    #swarm join
    s = client.swarm.join(remote_addrs=list(map((lambda x: x['ip']), config['managers'])), join_token=config['token'])

    print 'Joined swarm'
    
    if config['role'] == 'manager':
        ip = "%s:%s" % (socket.gethostbyname(socket.getfqdn()),'2377')