Exemplo n.º 1
0
def createEvent(apiHost,
                apiKey,
                fingerprintFields,
                title,
                organizationID,
                source = {'ref': platform.node(),'type': 'host'},
                severity=Event.INFO,
                sender={'ref': platform.node(),'type': 'host'},
                properties=None,
                status=Event.OK,
                tags=None,
                message=None,
                createdAt=None,
                receivedAt=None,
                eventId=None):
    c = EventConnection(apiKey=apiKey,organizationID=organizationID,apiHost=apiHost)
    
    e = Event(source,
              fingerprintFields,
              title,
              organizationID,
              severity,
              sender,
              properties,
              status,
              tags,
              message,
              createdAt,
              receivedAt,
              eventId)
    
    print(c.createEvent(e))
	def publish(self):
		text_ap = ["deviceid=" + self.info.deviceid, "features=" + hex(self.info.features), "model=" + self.info.model]
		text_at = ["tp=UDP", "sm=false", "sv=false", "ek=1", "et=0,1", "cn=0,1", "ch=2", "ss=16", "sr=44100", "pw=false", "vn=3", "txtvers=1"]
		bus = dbus.SystemBus()
		server = dbus.Interface(bus.get_object(avahi.DBUS_NAME, avahi.DBUS_PATH_SERVER), avahi.DBUS_INTERFACE_SERVER)

		self.group = dbus.Interface(bus.get_object(avahi.DBUS_NAME, server.EntryGroupNew()), avahi.DBUS_INTERFACE_ENTRY_GROUP)
		self.group.AddService(
			avahi.IF_UNSPEC,
			avahi.PROTO_UNSPEC,
			dbus.UInt32(0),
			AIRPLAY_BANNER + platform.node(),
			"_airplay._tcp",
			"",
			"",
			dbus.UInt16(AIRPLAY_PORT),
			avahi.string_array_to_txt_array(text_ap)
		)
		self.group.AddService(
			avahi.IF_UNSPEC,
			avahi.PROTO_UNSPEC,
			dbus.UInt32(0),
			self.info.deviceid + "@" + AIRPLAY_BANNER + platform.node(),
			"_raop._tcp",
			"",
			"",
			dbus.UInt16(AIRTUNES_PORT),
			avahi.string_array_to_txt_array(text_at)
		)
		self.group.Commit()
Exemplo n.º 3
0
def do_post(path):
  print "Posting..."
  t = Tumblpy("","",
      "","")
  tbuff = textview.get_buffer()
  article_text = ""
  if isWeather.get_active():
    article_text = get_date_desc() + weatherProvider.get_weather()
  article_text = article_text + tbuff.get_text(tbuff.get_start_iter(), tbuff.get_end_iter())
  blog_url = t.post('user/info')
  blog_url = blog_url['user']['blogs'][1]['url']
  if path.get_text() !="No image":
    photo = open(path.get_text(), 'rb')
    ephoto = open(path.get_text(), 'rb')
    tags = "catumblr , "+ platform.node()
    etags = exifread.process_file(ephoto)
    if etags.has_key('Image Model'):
      tags = "catumblr , "+ platform.node() + ", " + str(etags['Image Model'])
    p_params = {'type':'photo', 'caption': article_text, 'data': photo, 'tags':tags}
    ephoto.close()
  else:
    tags = "catumblr , "+ platform.node()
    time_caption = strftime("%Y-%m-%d %H:%M:%S", gmtime())
    p_params = {'type':'text', 'body': article_text, 'caption': time_caption, 'tags':tags}

  post = t.post('post', blog_url=blog_url, params=p_params)
  print post  # returns id if posted successfully
Exemplo n.º 4
0
def email_success(log_base=None):
    '''send an email to options.email about a successful build'''
    user = os.getenv("USER")
    if log_base is None:
        log_base = gitroot
    text = '''
Dear Developer,

Your autobuild on %s has succeeded.

''' % platform.node()

    if options.keeplogs:
        text += '''

you can get full logs of all tasks in this job here:

  %s/logs.tar.gz

''' % log_base

    text += '''
The top commit for the tree that was built was:

%s
''' % top_commit_msg

    logs = os.path.join(gitroot, 'logs.tar.gz')
    send_email('autobuild sucess on %s ' % platform.node(),
               text, logs)
Exemplo n.º 5
0
def test_dont_start_disabled_accounts(db, config, default_account):
    purge_other_accounts(default_account)
    config['SYNC_STEAL_ACCOUNTS'] = True
    default_account.sync_host = None
    default_account.disable_sync(reason='testing')
    db.session.commit()
    ss = SyncService(cpu_id=0, total_cpus=1)
    assert ss.accounts_to_start() == []
    assert default_account.sync_host is None
    assert default_account.sync_should_run is False

    default_account.sync_host = platform.node()
    default_account.disable_sync('testing')
    db.session.commit()
    ss = SyncService(cpu_id=0, total_cpus=1)
    assert ss.accounts_to_start() == []
    assert default_account.sync_should_run is False

    # Invalid Credentials
    default_account.mark_invalid()
    default_account.sync_host = None
    db.session.commit()

    # Don't steal invalid accounts
    ss = SyncService(cpu_id=0, total_cpus=1)
    assert ss.accounts_to_start() == []

    # Don't explicitly start invalid accounts
    default_account.sync_host = platform.node()
    db.session.commit()
    ss = SyncService(cpu_id=0, total_cpus=1)
    assert ss.accounts_to_start() == []
Exemplo n.º 6
0
	def sendmail_report(self,dirnames,email_type):
		text = str(platform.node())+" Export/Publish Report\n"
		text += "-"*len(text)+'\n'
		for dname in dirnames:
			if email_type == MailerStates.TYPE_OK:
				text += "Conversion succeeded for the directory: %s\n" % dname
			if email_type == MailerStates.TYPE_ERROR:
				text += "Conversion failed for the directory: %s\n" % dname
				
		msg = MIMEText(text)
		if email_type == MailerStates.TYPE_OK:
			msg['Subject'] = '[dPool Elastic Cluster] - Node:'+str(platform.node())+' - Successful Conversion'
		elif email_type == MailerStates.TYPE_ERROR:
			msg['Subject'] = '[dPool Elastic Cluster] - Node:'+str(platform.node())+' - Failed Conversion'
		elif email_type == MailerStates.TYPE_WARNING:
			msg['Subject'] = '[dPool Elastic Cluster] - Node:'+str(platform.node())+' - Warning'
		
		msg['From'] = _SENDER 
		try:
			s = smtplib.SMTP(self.smtp_server)
		except:
			self.Log.logger.error("Connection to SMTP Server %s failed." % self.smtp_server)

		for email_user in self.mailing_list:
			msg['To'] = email_user
			s.sendmail(_SENDER_EMAIL,[email_user],msg.as_string())
		s.quit()
Exemplo n.º 7
0
    def __init__(self):
        """Load configuration file

        Load json formatted configuration file, backup.json. Exception
        ErrorUnknownConfigOption is raised if unknown configuration options
        are found.
        """
        self.config = json.load(open('../backup.json', 'r'))
        self.servers = []

        try:
            for x in ['frequency', 'log_file', 'log_level', 'verbosity',
                      'daemonize', 'my_name']:
                setattr(self, x, self.config['general'][x])
                del(self.config['general'][x])
            if not self.my_name:
                self.my_name = platform.node()
        except KeyError, e:
            # Set defaults
            if x is 'my_name':
                setattr(self, x, platform.node())
            if x is 'verbosity':
                setattr(self, x, 0)
            else:
                raise ErrorMissingConfigOption(e)
Exemplo n.º 8
0
def is_host_allowed():
    from platform   import node
    if 'seaman-' not in node():
        print 'This host can not run selenium,',node()
        return False
    else:
        return True
Exemplo n.º 9
0
def email_success(elapsed_time, log_base=None):
    '''send an email to options.email about a successful build'''
    user = os.getenv("USER")
    if log_base is None:
        log_base = gitroot
    text = '''
Dear Developer,

Your autobuild on %s has succeeded after %.1f minutes.

''' % (platform.node(), elapsed_time / 60.)

    if options.restrict_tests:
        text += """
The build was restricted to tests matching %s\n""" % options.restrict_tests

    if options.keeplogs:
        text += '''

you can get full logs of all tasks in this job here:

  %s/logs.tar.gz

''' % log_base

    text += '''
The top commit for the tree that was built was:

%s
''' % top_commit_msg

    logs = os.path.join(gitroot, 'logs.tar.gz')
    send_email('autobuild[%s] success on %s' % (options.branch, platform.node()),
               text, logs)
    def testCombinedEvent(self):
        testEnv = TestEnvironment()
        broker = testEnv.getBroker()

        topic1 = "test_events_3_%s_%d" % (platform.node(), os.getpid())
        topic2 = "test_events_3a_%s_%d" % (platform.node(), os.getpid())
        combinedTopic = topic1 + "," + topic2
        eventSystem = events.EventSystem.getDefaultEventSystem()
        eventSystem.createReceiver(broker, topic1)
        eventSystem.createReceiver(broker, topic2)
        eventSystem.createTransmitter(broker, combinedTopic)
    
        #
        # send a test event on both topics at once, and have each receiver wait to
        # receive it
        #
        self.sendEvent(combinedTopic)
    
        val = eventSystem.receiveEvent(topic1)
        self.assertIsNotNone(val)
        ps = val.getPropertySet()
        print ps.toString()
    
        val = eventSystem.receiveEvent(topic2)
        self.assertIsNotNone(val)
        ps = val.getPropertySet()
        print ps.toString()
Exemplo n.º 11
0
	def setUp(self):

		cxm.core.cfg['PATH'] = "tests/stubs/bin/"
		cxm.core.cfg['VMCONF_DIR'] = "tests/stubs/cfg/"
		cxm.core.cfg['QUIET']=True
	#	cxm.core.cfg['DEBUG']=True

		# Dummy mocker
		dummy_mock = Mocker()
		dummy = dummy_mock.mock()
		dummy_mock.replay()

		# Mock node class
		node_mock = Mocker()
		node = node_mock.mock()
		node.Node(platform.node())
		node_mock.result(dummy)
		node_mock.replay()

		cxm.xencluster.node=node

		# Run test
		self.cluster=cxm.xencluster.XenCluster([platform.node()])

		node_mock.verify()
		node_mock.restore()
Exemplo n.º 12
0
 def get_platform_facts(self):
     self.facts['system'] = platform.system()
     self.facts['kernel'] = platform.release()
     self.facts['machine'] = platform.machine()
     self.facts['python_version'] = platform.python_version()
     self.facts['fqdn'] = socket.getfqdn()
     self.facts['hostname'] = platform.node().split('.')[0]
     self.facts['nodename'] = platform.node()
     self.facts['domain'] = '.'.join(self.facts['fqdn'].split('.')[1:])
     arch_bits = platform.architecture()[0]
     self.facts['userspace_bits'] = arch_bits.replace('bit', '')
     if self.facts['machine'] == 'x86_64':
         self.facts['architecture'] = self.facts['machine']
         if self.facts['userspace_bits'] == '64':
             self.facts['userspace_architecture'] = 'x86_64'
         elif self.facts['userspace_bits'] == '32':
             self.facts['userspace_architecture'] = 'i386'
     elif Facts._I386RE.search(self.facts['machine']):
         self.facts['architecture'] = 'i386'
         if self.facts['userspace_bits'] == '64':
             self.facts['userspace_architecture'] = 'x86_64'
         elif self.facts['userspace_bits'] == '32':
             self.facts['userspace_architecture'] = 'i386'
     else:
         self.facts['architecture'] = self.facts['machine']
     if self.facts['system'] == 'Linux':
         self.get_distribution_facts()
     elif self.facts['system'] == 'AIX':
         rc, out, err = module.run_command("/usr/sbin/bootinfo -p")
         data = out.split('\n')
         self.facts['architecture'] = data[0]
     elif self.facts['system'] == 'OpenBSD':
         self.facts['architecture'] = platform.uname()[5]
Exemplo n.º 13
0
def run_reports(dry_run=False,
                time_queries=False,
                you="nobody@localhost",
                me="yield@"+platform.node(),
                host=platform.node()
           ):

    try:
        session = Session()

        if time_queries:
            start_time = time.time()

        html = []
        for (report, method) in reports:
            html.extend(method(session))
            if time_queries:
                html.append("<p>%s took: %ld secs</p>" % (report, time.time() - start_time))
                start_time = time.time()

        if len(html) == 0:
            html = ['No status updates to report']

    finally:
        session.close()

    message = header(you=you, me=me, host=host) + "".join(html) + footer()

    if dry_run:
        print message
    else:
        # Send the message via local SMTP server.

        s = smtplib.SMTP('localhost')
        s.sendmail(me, you, message)
Exemplo n.º 14
0
    def assertValid(self, val, names, origNum, destNum):
        """do a validity check on the values we received."""
        # get only the filterable properties and make sure we only get the names we expect
        fnames = val.getFilterablePropertyNames()
        self.assertTrue(len(fnames), len(names))

        for x in names:
            self.assertTrue(x in fnames)

        # get the whole PropertySet and make sure we only get the names we expect

        props = list(names)
        props.append('myname')
        ps = val.getPropertySet()
        self.assertEqual(ps.nameCount(), len(props))
        for x in names:
            self.assertTrue(ps.exists(x))

        eventsystem = events.EventSystem.getDefaultEventSystem()
        commandEvent = eventsystem.castToCommandEvent(val)

        # check originator values
        orig = commandEvent.getOriginator()
        self.assertEqual(orig.getLocalID(), origNum)
        self.assertEqual(orig.getProcessID(), os.getpid())
        self.assertEqual(orig.getHostName(), platform.node())

        dest = commandEvent.getDestination()

        # check destination values
        self.assertEqual(dest.getLocalID(), destNum)
        self.assertEqual(dest.getProcessID(), os.getpid())
        self.assertEqual(dest.getHostName(), platform.node())
Exemplo n.º 15
0
def main(args):
    st = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
    tee.log('find_cells.py running on',platform.node(),st)

    mkdir_p(args.outdir+'/'+args.substack_id)
    if args.pair_id is None:
        tee.logto('%s/%s/log.txt' % (args.outdir, args.substack_id))
	args.outdir=args.outdir+'/'+args.substack_id
    else:
        tee.logto('%s/%s/log_%s.txt' % (args.outdir, args.substack_id, args.pair_id))
	args.outdir=args.outdir+'/'+args.substack_id+'/'+args.pair_id
        mkdir_p(args.outdir)

    timers = [mscd.pca_analysis_timer, mscd.mean_shift_timer, mscd.ms_timer, mscd.patch_ms_timer]
    timers.extend([volume.save_vaa3d_timer, volume.save_markers_timer])
    timers.extend([threshold.multi_kapur_timer])
    for t in timers:
        t.reset()


    substack = volume.SubStack(args.indir, args.substack_id)
    substack.load_volume(pair_id=args.pair_id)
    if args.local:
        mscd.pms(substack, args)
    else:
        mscd.ms(substack, args)
    for t in timers:
        if t.n_calls > 0:
            tee.log(t)
    st = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
    tee.log('find_cells.py finished on',platform.node(),st)
Exemplo n.º 16
0
def email_success(elapsed_time, log_base=None):
    '''send an email to options.email about a successful build'''
    user = os.getenv("USER")
    if log_base is None:
        log_base = gitroot
    text = '''
Dear Developer,

Your autobuild on {0!s} has succeeded after {1:.1f} minutes.

'''.format(platform.node(), elapsed_time / 60.)

    if options.keeplogs:
        text += '''

you can get full logs of all tasks in this job here:

  {0!s}/logs.tar.gz

'''.format(log_base)

    text += '''
The top commit for the tree that was built was:

{0!s}
'''.format(top_commit_msg)

    logs = os.path.join(gitroot, 'logs.tar.gz')
    send_email('autobuild sucess on {0!s} '.format(platform.node()),
               text, logs)
Exemplo n.º 17
0
    def emit(self, record):
        # type: (ExceptionReporter) -> None
        try:
            request = record.request  # type: HttpRequest

            exception_filter = get_exception_reporter_filter(request)

            if record.exc_info:
                stack_trace = ''.join(traceback.format_exception(*record.exc_info))
            else:
                stack_trace = None

            try:
                user_profile = request.user
                user_full_name = user_profile.full_name
                user_email = user_profile.email
            except Exception:
                traceback.print_exc()
                # Error was triggered by an anonymous user.
                user_full_name = None
                user_email = None

            data = request.GET if request.method == 'GET' else \
                exception_filter.get_post_parameters(request)

            report = dict(
                node = platform.node(),
                method = request.method,
                path = request.path,
                data = data,
                remote_addr = request.META.get('REMOTE_ADDR', None),
                query_string = request.META.get('QUERY_STRING', None),
                server_name = request.META.get('SERVER_NAME', None),
                message = record.getMessage(),
                stack_trace = stack_trace,
                user_full_name = user_full_name,
                user_email = user_email,
            )
        except Exception:
            traceback.print_exc()
            report = dict(
                node = platform.node(),
                message = record.getMessage(),
            )

        try:
            if settings.STAGING_ERROR_NOTIFICATIONS:
                # On staging, process the report directly so it can happen inside this
                # try/except to prevent looping
                from zilencer.error_notify import notify_server_error
                notify_server_error(report)
            else:
                queue_json_publish('error_reports', dict(
                    type = "server",
                    report = report,
                ), lambda x: None)
        except Exception:
            # If this breaks, complain loudly but don't pass the traceback up the stream
            # However, we *don't* want to use logging.exception since that could trigger a loop.
            logging.warning("Reporting an exception triggered an exception!", exc_info=True)
Exemplo n.º 18
0
def info():
    jsondata = '"os":{'
    jsondata += '"system":"'
    jsondata += platform.system()
    jsondata += '","network-name":"'
    jsondata += platform.node()
    jsondata += '","release":"'
    jsondata += platform.release()
    jsondata += '","version":"'
    jsondata += platform.version()
    jsondata += '","arch":"'
    jsondata += platform.machine()
    jsondata += '","boot-time":'
    jsondata += str(psutil.boot_time())
    jsondata += ','
    jsondata += usersInfo()
    os = platform.system()
    rl = platform.release()
    vs = platform.version()
    mh = platform.machine()
    un = platform.uname()
    nd = platform.node()
    jsondata += '}'
    
    
    return jsondata
Exemplo n.º 19
0
def email_failure(status, failed_task, failed_stage, failed_tag, errstr,
                  elapsed_time, log_base=None, add_log_tail=True):
    '''send an email to options.email about the failure'''
    elapsed_minutes = elapsed_time / 60.0
    user = os.getenv("USER")
    if log_base is None:
        log_base = gitroot
    text = '''
Dear Developer,

Your autobuild on %s failed after %.1f minutes
when trying to test %s with the following error:

   %s

the autobuild has been abandoned. Please fix the error and resubmit.

A summary of the autobuild process is here:

  %s/autobuild.log
''' % (platform.node(), elapsed_minutes, failed_task, errstr, log_base)

    if failed_task != 'rebase':
        text += '''
You can see logs of the failed task here:

  %s/%s.stdout
  %s/%s.stderr

or you can get full logs of all tasks in this job here:

  %s/logs.tar.gz

The top commit for the tree that was built was:

%s

''' % (log_base, failed_tag, log_base, failed_tag, log_base, top_commit_msg)

    if add_log_tail:
        f = open("%s/%s.stdout" % (gitroot, failed_tag), 'r')
        lines = f.readlines()
        log_tail = "".join(lines[-50:])
        num_lines = len(lines)
        if num_lines < 50:
            # Also include stderr (compile failures) if < 50 lines of stdout
            f = open("%s/%s.stderr" % (gitroot, failed_tag), 'r')
            log_tail += "".join(f.readlines()[-(50-num_lines):])

        text += '''
The last 50 lines of log messages:

%s
    ''' % log_tail
        f.close()

    logs = os.path.join(gitroot, 'logs.tar.gz')
    send_email('autobuild failure on %s for task %s during %s'
               % (platform.node(), failed_task, failed_stage),
               text, logs)
Exemplo n.º 20
0
	def test_get_local_node(self):
		node = self.mocker.mock()
		node.get_hostname()
		self.mocker.result(platform.node())
		self.mocker.replay()
		self.cluster.nodes={platform.node(): node}

		self.assertEqual(self.cluster.get_local_node().get_hostname(),platform.node())
Exemplo n.º 21
0
def main():

    # Command line argument parsing
    parser = argparse.ArgumentParser(description="ADP Restarter")
    parser.add_argument("-v", "--version", action="version", version="%(prog)s 1.0")
    parser.add_argument("-d", dest="dir", type=str, default="/dados/xml/data", help="Directory to monitor")
    parser.add_argument("-p", dest="pattern", type=str, default="", help="File match pattern")
    parser.add_argument("-c", dest="crit", type=int, default=400, help="Restart threshold")
    parser.add_argument("-f", dest="logfile", type=str, default="logs.zip", help="Log archive")
    args = parser.parse_args(sys.argv[1:])

    count = 00
    for root, path, files in os.walk(args.dir):
        for file in files:
            if re.match(args.pattern, file):
                count += 1

    if count > args.crit:

        zf = zipfile.ZipFile("/tmp/" + args.logfile, mode="w")
        zf.write("/var/log/m2m-adapter.log", compress_type=zipfile.ZIP_DEFLATED)
        zf.write("/var/log/syslog", compress_type=zipfile.ZIP_DEFLATED)
        zf.write("/var/log/mongos.log", compress_type=zipfile.ZIP_DEFLATED)
        zf.close()

        dest = [
            "*****@*****.**",
            "*****@*****.**",
            "*****@*****.**",
            "*****@*****.**",
            "*****@*****.**",
        ]

        msg = MIMEMultipart()
        msg["From"] = "*****@*****.**"
        msg["To"] = "M2M"
        msg["Date"] = formatdate(localtime=True)
        msg["Subject"] = "ADP-RESTARTER - " + platform.node()
        msg.attach(MIMEText("O M2M Adapter em " + platform.node() + "foi reiniciado por acumulo de xmls"))

        attach = open("/tmp/" + args.logfile, "rb")
        part = MIMEBase("application", "octet-stream")
        part.set_payload(attach.read())
        encoders.encode_base64(part)
        part.add_header("Content-Disposition", 'attachment; filename="' + args.logfile + '"')
        msg.attach(part)

        try:
            server = smtplib.SMTP_SSL("smtp.gmail.com", 465)
            server.ehlo()
            server.login(msg["From"], "m2msolutions")
            server.sendmail(msg["From"], dest, str(msg))
            server.close()
            print "successfully sent the mail"
        except Exception, e:
            print e

        subprocess.call(["/etc/init.d/m2m-adapter restart"], shell=True)
Exemplo n.º 22
0
def copr_permissions_applier_change(username, coprname):
    copr = coprs_logic.CoprsLogic.get(flask.g.user, username, coprname).first()
    permission = coprs_logic.CoprPermissionsLogic.get(
        flask.g.user, copr, flask.g.user).first()
    applier_permissions_form = \
        forms.PermissionsApplierFormFactory.create_form_cls(permission)()

    if not copr:
        return page_not_found(
            "Project with name {0} does not exist.".format(coprname))

    if copr.owner == flask.g.user:
        flask.flash("Owner cannot request permissions for his own project.")
    elif applier_permissions_form.validate_on_submit():
        # we rely on these to be 0 or 1 from form. TODO: abstract from that
        if permission is not None:
            old_builder = permission.copr_builder
            old_admin = permission.copr_admin
        else:
            old_builder = 0
            old_admin = 0
        new_builder = applier_permissions_form.copr_builder.data
        new_admin = applier_permissions_form.copr_admin.data
        coprs_logic.CoprPermissionsLogic.update_permissions_by_applier(
            flask.g.user, copr, permission, new_builder, new_admin)
        db.session.commit()
        flask.flash(
            "Successfuly updated permissions for project '{0}'."
            .format(copr.name))
        admin_mails = [copr.owner.mail]
        for perm in copr.copr_permissions:
            # this 2 means that his status (admin) is approved
            if perm.copr_admin == 2:
                admin_mails.append(perm.user.mail)

        # sending emails
        if flask.current_app.config.get("SEND_EMAILS", False):
            for mail in admin_mails:
                msg = MIMEText(
                    "{6} is asking for these permissions:\n\n"
                    "Builder: {0} -> {1}\nAdmin: {2} -> {3}\n\n"
                    "Project: {4}\nOwner: {5}".format(
                        helpers.PermissionEnum(old_builder),
                        helpers.PermissionEnum(new_builder),
                        helpers.PermissionEnum(old_admin),
                        helpers.PermissionEnum(new_admin),
                        copr.name, copr.owner.name, flask.g.user.name))

                msg["Subject"] = "[Copr] {0}: {1} is asking permissons".format(copr.name, flask.g.user.name)
                msg["From"] = "root@{0}".format(platform.node())
                msg["To"] = mail
                s = smtplib.SMTP("localhost")
                s.sendmail("root@{0}".format(platform.node()), mail, msg.as_string())
                s.quit()

    return flask.redirect(flask.url_for("coprs_ns.copr_detail",
                                        username=copr.owner.name,
                                        coprname=copr.name))
Exemplo n.º 23
0
def get_my_info():
    """ Return general information about this node
    """
    result = {}
    result['host_name'] = platform.node()
    result['real_host_name'] = platform.node()
    result['dist'] = platform.dist()
    result['nago_version'] = nago.get_version()
    return result
Exemplo n.º 24
0
Arquivo: testhost.py Projeto: afajl/sy
def get_info():
    import platform
    try:
        return hosts[platform.node()]
    except KeyError:
        raise RuntimeError(
            'This OS instance "%s" is not configured as a test host, ' % platform.node() 
            + 'run with "nosetest -a \'!host\' tests" or add this ' 
            + 'host to "%s"' % __file__)
Exemplo n.º 25
0
def discover_conduit():
    if 'CONDUIT' in os.environ:
        return os.environ['CONDUIT']
    elif platform.node().startswith('daint'):
        return 'aries'
    elif platform.node().startswith('excalibur'):
        return 'aries'
    else:
        raise Exception('Please set CONDUIT in your environment')
Exemplo n.º 26
0
 def __init__(self,jobID=None,verbose=False,MPIrank=None):
     """Initialises the object and creates the pool file (unless the file
     has already been created by another process)
     
     Keyword Args:
     
     jobID -- a unique identifier for the current job so that if a previous
     pool file has not been cleaned up then the current job knows to just
     overwrite it.
     verbose -- set to True for debugging output
     MPIrank -- If python is handling the MPI initialisation then we can just
     use the mpi rank rather than the IDpool checkout system.
     """
     fname = 'IDpool-{0}.log'.format(jobID)
     if MPIrank!=None:
         print 'Using MPIrank as job ID number:', MPIrank
         self.ID = MPIrank
         print 'checkpoint idpool 1'
         f = open(fname,'w')
         f.write('{0} - {1}\n'.format(self.ID,platform.node()))
         #f.write('{0} - {1}\n'.format(self.ID,'test'))
         print 'checkpoint idpool 2'
     else:
         print 'No MPIrank received, initalising IDpool file'
         #use the IDpool checkout system as a backup
         if verbose: print 'START'
         lock = Lock(fname,timeout=120,step=1)   #lock the filename before we even try to open it, to prevent other processes sneaking in and accessing it in the gap we would leave otherwise
         lock.lock(force=False)
         if verbose: print 'Lock acquired'
         try:
             f = open(fname,'r+')     #try to open file, will fail if it does not already exist
         except IOError, e:  #if file fails to open, make sure it is because it doesn't exist and then create it
             if e.errno == errno.ENOENT:
                 f = open(fname,'w')
                 if verbose: print 'Starting new log file'
                 #Start a new log file
                 f.write('{0}\n'.format(jobID))                 # first line is the jobID
                 f.write('0 - {0}\n'.format(platform.node()))   # checkout line, an integer and the name of the node which checked it out
                 self.ID = 0 #check out the first ID and exit
                 f.close()   #close the file
                 lock.unlock()   #release the lock
                 return  #we are done!
                 
         #if the file already exists, loop to the end to determine the next number to log out
         for i,line in enumerate(f):    #string will be empty only when EOF is reached
             thisline = line
         # end loop: we just let the loop finish because we only care about the last line of the file to figure
         # out what the next ID should be
         # print 'HERE100: '+thisline
         l = thisline.partition(' - ')       #split the last line of the file into the ID, the separator and the node name
         if verbose: print l
         self.ID = int(l[0]) + 1         #take the last ID checked out, add 1, and set this as the ID for the current process.
         # write a new line to file recording the newly checked out ID
         f.write('{0} - {1}\n'.format(self.ID,platform.node()))
         f.close()       #close file
         lock.unlock()   #release the lock
Exemplo n.º 27
0
def geisysteminfo():
    """"""
    print platform.system()
    print platform.version()
    print platform.architecture()
    print platform.node()
    print platform.java_ver()
    print platform.dist()
    print platform.python_version()
    print platform.win32_ver()
Exemplo n.º 28
0
    def collect(self, module=None, collected_facts=None):
        platform_facts = {}
        # platform.system() can be Linux, Darwin, Java, or Windows
        platform_facts['system'] = platform.system()
        platform_facts['kernel'] = platform.release()
        platform_facts['machine'] = platform.machine()

        platform_facts['python_version'] = platform.python_version()

        platform_facts['fqdn'] = socket.getfqdn()
        platform_facts['hostname'] = platform.node().split('.')[0]
        platform_facts['nodename'] = platform.node()

        platform_facts['domain'] = '.'.join(platform_facts['fqdn'].split('.')[1:])

        arch_bits = platform.architecture()[0]

        platform_facts['userspace_bits'] = arch_bits.replace('bit', '')
        if platform_facts['machine'] == 'x86_64':
            platform_facts['architecture'] = platform_facts['machine']
            if platform_facts['userspace_bits'] == '64':
                platform_facts['userspace_architecture'] = 'x86_64'
            elif platform_facts['userspace_bits'] == '32':
                platform_facts['userspace_architecture'] = 'i386'
        elif solaris_i86_re.search(platform_facts['machine']):
            platform_facts['architecture'] = 'i386'
            if platform_facts['userspace_bits'] == '64':
                platform_facts['userspace_architecture'] = 'x86_64'
            elif platform_facts['userspace_bits'] == '32':
                platform_facts['userspace_architecture'] = 'i386'
        else:
            platform_facts['architecture'] = platform_facts['machine']

        if platform_facts['system'] == 'AIX':
            # Attempt to use getconf to figure out architecture
            # fall back to bootinfo if needed
            getconf_bin = module.get_bin_path('getconf')
            if getconf_bin:
                rc, out, err = module.run_command([getconf_bin, 'MACHINE_ARCHITECTURE'])
                data = out.splitlines()
                platform_facts['architecture'] = data[0]
            else:
                bootinfo_bin = module.get_bin_path('bootinfo')
                rc, out, err = module.run_command([bootinfo_bin, '-p'])
                data = out.splitlines()
                platform_facts['architecture'] = data[0]
        elif platform_facts['system'] == 'OpenBSD':
            platform_facts['architecture'] = platform.uname()[5]

        machine_id = get_file_content("/var/lib/dbus/machine-id") or get_file_content("/etc/machine-id")
        if machine_id:
            machine_id = machine_id.splitlines()[0]
            platform_facts["machine_id"] = machine_id

        return platform_facts
Exemplo n.º 29
0
def email_failure(status, failed_task, failed_stage, failed_tag, errstr, elapsed_time, log_base=None):
    """send an email to options.email about the failure"""
    elapsed_minutes = elapsed_time / 60.0
    user = os.getenv("USER")
    if log_base is None:
        log_base = gitroot
    text = """
Dear Developer,

Your autobuild on %s failed after %.1f minutes
when trying to test %s with the following error:

   %s

the autobuild has been abandoned. Please fix the error and resubmit.

A summary of the autobuild process is here:

  %s/autobuild.log
""" % (
        platform.node(),
        elapsed_minutes,
        failed_task,
        errstr,
        log_base,
    )

    if failed_task != "rebase":
        text += """
You can see logs of the failed task here:

  %s/%s.stdout
  %s/%s.stderr

or you can get full logs of all tasks in this job here:

  %s/logs.tar.gz

The top commit for the tree that was built was:

%s

""" % (
            log_base,
            failed_tag,
            log_base,
            failed_tag,
            log_base,
            top_commit_msg,
        )

    logs = os.path.join(gitroot, "logs.tar.gz")
    send_email(
        "autobuild failure on %s for task %s during %s" % (platform.node(), failed_task, failed_stage), text, logs
    )
Exemplo n.º 30
0
def get_caffe_path():
    if platform.node() == 'HUB':  # UNI computer
        return '/home/hayden/caffe-recurrent/'
    elif platform.node() == 'HUB-HOME':  # HOME computer
        return '/home/hayden/caffe-master/'
    elif 'adelaide.edu.au' in platform.node():
        print 'NO CAFFE ON CLUSTER'
        return '/home/a1211517/'
    else:
        print 'ERROR: NOT A RECOGNISED PC'
        return None
Exemplo n.º 31
0
 def _check_header(uri, headers=None, query_params=None):
     useragent = headers['X-Contrail-Useragent']
     hostname = platform.node()
     self.assertThat(useragent, Contains(hostname))
     return (200, json.dumps({}))
Exemplo n.º 32
0
def get_platform_info():
    """
    Return a dictionary containing information about the system platform.

    Returns
    -------
    platform_info : dict
        Various information about the system platform.

    See Also
    --------
    platform : The Python module used to query information about the system.

    Notes
    -----
    The returned dictionary has the following keys:

    * system
    * node
    * release
    * version
    * processor
    * python
    * libc
    * linux
    * mac_os
    * win32
    * status

    The linux/mac_os/win32 entries are "empty" if they are not applicable.

    If the processor information returned by `platform` is "empty", a query of
    `lscpu` is attempted in order to provide the necessary information.

    The status entry informs about the success of the queries. It has one of
    the follwing values:

    * 'All OK' (everything seems to be OK)
    * 'Used lscpu in processor query' (`lscpu` was used)
    * 'Processor query failed' (failed to get processor information)

    """

    platform_info = {
        'system': json.dumps(platform.system()),
        'node': json.dumps(platform.node()),
        'release': json.dumps(platform.release()),
        'version': json.dumps(platform.version()),
        'machine': json.dumps(platform.machine()),
        'processor': json.dumps(platform.processor()),
        'python': json.dumps(platform.python_version()),
        'libc': json.dumps(platform.libc_ver()),
        'linux': json.dumps(platform.linux_distribution()),
        'mac_os': json.dumps(platform.mac_ver()),
        'win32': json.dumps(platform.win32_ver()),
        'status': 'All OK'
    }

    if platform_info['processor'] == '':
        try:
            platform_info['processor'] = str(
                subprocess.check_output(['lscpu']).decode())
            platform_info['status'] = 'Used lscpu in processor query'

        except (subprocess.CalledProcessError, OSError):
            platform_info['status'] = 'Processor query failed'

    return platform_info
Exemplo n.º 33
0
def get_app_name(hostname):
    """
    Get the app name from the host name.

    The hostname in our deployments will be in the form `bedrock-{version}-{type}-{random-ID}`
    where {version} is "dev", "stage", or "prod", and {type} is the process type
    (e.g. "web" or "clock"). Everywhere else it won't be in this form and will return None.
    """
    if hostname.startswith('bedrock-'):
        app_mode = hostname.split('-')[1]
        return 'bedrock-' + app_mode

    return None


HOSTNAME = platform.node()
APP_NAME = get_app_name(HOSTNAME)
CLUSTER_NAME = config('CLUSTER_NAME', default='')
ENABLE_HOSTNAME_MIDDLEWARE = config('ENABLE_HOSTNAME_MIDDLEWARE',
                                    default=str(bool(APP_NAME)),
                                    parser=bool)
ENABLE_VARY_NOCACHE_MIDDLEWARE = config('ENABLE_VARY_NOCACHE_MIDDLEWARE',
                                        default='true',
                                        parser=bool)
# set this to enable basic auth for the entire site
# e.g. BASIC_AUTH_CREDS="thedude:thewalrus"
BASIC_AUTH_CREDS = config('BASIC_AUTH_CREDS', default='')

MIDDLEWARE_CLASSES = [
    'allow_cidr.middleware.AllowCIDRMiddleware',
    'django.middleware.security.SecurityMiddleware',
Exemplo n.º 34
0
def email_failure(status,
                  failed_task,
                  failed_stage,
                  failed_tag,
                  errstr,
                  elapsed_time,
                  log_base=None,
                  add_log_tail=True):
    '''send an email to options.email about the failure'''
    elapsed_minutes = elapsed_time / 60.0
    user = os.getenv("USER")
    if log_base is None:
        log_base = gitroot
    text = '''
Dear Developer,

Your autobuild on %s failed after %.1f minutes
when trying to test %s with the following error:

   %s

the autobuild has been abandoned. Please fix the error and resubmit.

A summary of the autobuild process is here:

  %s/autobuild.log
''' % (platform.node(), elapsed_minutes, failed_task, errstr, log_base)

    if options.restrict_tests:
        text += """
The build was restricted to tests matching %s\n""" % options.restrict_tests

    if failed_task != 'rebase':
        text += '''
You can see logs of the failed task here:

  %s/%s.stdout
  %s/%s.stderr

or you can get full logs of all tasks in this job here:

  %s/logs.tar.gz

The top commit for the tree that was built was:

%s

''' % (log_base, failed_tag, log_base, failed_tag, log_base, top_commit_msg)

    if add_log_tail:
        f = open("%s/%s.stdout" % (gitroot, failed_tag), 'r')
        lines = f.readlines()
        log_tail = "".join(lines[-50:])
        num_lines = len(lines)
        if num_lines < 50:
            # Also include stderr (compile failures) if < 50 lines of stdout
            f = open("%s/%s.stderr" % (gitroot, failed_tag), 'r')
            log_tail += "".join(f.readlines()[-(50 - num_lines):])

        text += '''
The last 50 lines of log messages:

%s
    ''' % log_tail
        f.close()

    logs = os.path.join(gitroot, 'logs.tar.gz')
    send_email(
        'autobuild[%s] failure on %s for task %s during %s' %
        (options.branch, platform.node(), failed_task, failed_stage), text,
        logs)
Exemplo n.º 35
0
    def train(self):
        """Runs one logical iteration of training.

        Calls ``step()`` internally. Subclasses should override ``step()``
        instead to return results.
        This method automatically fills the following fields in the result:

            `done` (bool): training is terminated. Filled only if not provided.

            `time_this_iter_s` (float): Time in seconds this iteration
            took to run. This may be overridden in order to override the
            system-computed time difference.

            `time_total_s` (float): Accumulated time in seconds for this
            entire experiment.

            `experiment_id` (str): Unique string identifier
            for this experiment. This id is preserved
            across checkpoint / restore calls.

            `training_iteration` (int): The index of this
            training iteration, e.g. call to train(). This is incremented
            after `step()` is called.

            `pid` (str): The pid of the training process.

            `date` (str): A formatted date of when the result was processed.

            `timestamp` (str): A UNIX timestamp of when the result
            was processed.

            `hostname` (str): Hostname of the machine hosting the training
            process.

            `node_ip` (str): Node ip of the machine hosting the training
            process.

        Returns:
            A dict that describes training progress.
        """
        start = time.time()
        result = self.step()
        assert isinstance(result, dict), "step() needs to return a dict."

        # We do not modify internal state nor update this result if duplicate.
        if RESULT_DUPLICATE in result:
            return result

        result = result.copy()

        self._iteration += 1
        self._iterations_since_restore += 1

        if result.get(TIME_THIS_ITER_S) is not None:
            time_this_iter = result[TIME_THIS_ITER_S]
        else:
            time_this_iter = time.time() - start
        self._time_total += time_this_iter
        self._time_since_restore += time_this_iter

        result.setdefault(DONE, False)

        # self._timesteps_total should only be tracked if increments provided
        if result.get(TIMESTEPS_THIS_ITER) is not None:
            if self._timesteps_total is None:
                self._timesteps_total = 0
            self._timesteps_total += result[TIMESTEPS_THIS_ITER]
            self._timesteps_since_restore += result[TIMESTEPS_THIS_ITER]

        # self._episodes_total should only be tracked if increments provided
        if result.get(EPISODES_THIS_ITER) is not None:
            if self._episodes_total is None:
                self._episodes_total = 0
            self._episodes_total += result[EPISODES_THIS_ITER]

        # self._timesteps_total should not override user-provided total
        result.setdefault(TIMESTEPS_TOTAL, self._timesteps_total)
        result.setdefault(EPISODES_TOTAL, self._episodes_total)
        result.setdefault(TRAINING_ITERATION, self._iteration)

        # Provides auto-filled neg_mean_loss for avoiding regressions
        if result.get("mean_loss"):
            result.setdefault("neg_mean_loss", -result["mean_loss"])

        now = datetime.today()
        result.update(experiment_id=self._experiment_id,
                      date=now.strftime("%Y-%m-%d_%H-%M-%S"),
                      timestamp=int(time.mktime(now.timetuple())),
                      time_this_iter_s=time_this_iter,
                      time_total_s=self._time_total,
                      pid=os.getpid(),
                      hostname=platform.node(),
                      node_ip=self._local_ip,
                      config=self.config,
                      time_since_restore=self._time_since_restore,
                      timesteps_since_restore=self._timesteps_since_restore,
                      iterations_since_restore=self._iterations_since_restore)

        monitor_data = self._monitor.get_data()
        if monitor_data:
            result.update(monitor_data)

        self.log_result(result)

        if self._stdout_context:
            self._stdout_stream.flush()
        if self._stderr_context:
            self._stderr_stream.flush()

        return result
Exemplo n.º 36
0
 def handle(self):
     data = self.request.recv(1024)
     print("CGSync server: ", self.client_address, "-", data)
     self.request.sendall("Hi from cgsync server " + platform.node())
Exemplo n.º 37
0
 def handle(self):
     data = self.request[0]
     socket = self.request[1]
     print("Discovery server: ", self.client_address, "-", data)
     socket.sendto("Hi from discovery server " + platform.node(),
                   self.client_address)
Exemplo n.º 38
0
 def add_origin_info(self):
     self.add_header('Origin-Machine-Name', platform.node())
     self.add_header('Origin-Software-Name', 'gntp.py')
     self.add_header('Origin-Software-Version', __version__)
     self.add_header('Origin-Platform-Name', platform.system())
     self.add_header('Origin-Platform-Version', platform.platform())
Exemplo n.º 39
0
    '%(asctime)s.%(msecs)03d %(levelname)s %(message)s',
    datefmt='%Y-%m-%dT%H:%M:%S')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
level = config.get('logging', 'level')
if level == "DEBUG":
    logger.setLevel(logging.DEBUG)
elif level == "ERROR":
    logger.setLevel(logging.ERROR)
else:
    logger.setLevel(logging.INFO)
get_versions_for = ast.literal_eval(config.get('versions', 'versions_for'))
output_dict = {}
cert_entry = list()
dict_added_items = {}
host = platform.node()
mounts = ast.literal_eval(config.get('system', 'mounts'))
timeout = ast.literal_eval(config.get('health', 'timeout'))
counter = ast.literal_eval(config.get('health', 'counter'))
ports = ast.literal_eval(config.get('health', 'ports'))
app_name = ast.literal_eval(config.get('health', 'application'))
app_log_files = ast.literal_eval(config.get('health', 'logfile'))
app_health_string = ast.literal_eval(config.get('health', 'search_string'))
docker_socket_uri = ast.literal_eval(config.get('docker', 'docker_socket_uri'))
whitelist_containers = ast.literal_eval(
    config.get('docker', 'whitelist_containers'))
cert_path = ast.literal_eval(config.get('cert', 'cert_path'))
threshold_days = 30


def __init__(self):
Exemplo n.º 40
0
def get_system_info() -> dict:
    """ Return summary info for system running benchmark
    Returns
    -------
    dict
        Dictionary containing the following system information:
        * ``host_name`` (str): name of machine
        * ``op_sys`` (str): operating system
        * ``python`` (str): path to python (which conda/virtual environment)
        * ``device`` (tuple): (device type (``'GPU'`` or ``'CPU'```), device information)
        * ``freeze`` (list): list of installed packages and versions
        * ``python_version`` (str): python version
        * ``git_hash`` (str, None): If installed from git repository, hash of HEAD commit
        * ``dlclive_version`` (str): dlclive version from :data:`dlclive.VERSION`
    """

    # get os

    op_sys = platform.platform()
    host_name = platform.node().replace(" ", "")

    # A string giving the absolute path of the executable binary for the Python interpreter, on systems where this makes sense.
    if platform.system() == "Windows":
        host_python = sys.executable.split(os.path.sep)[-2]
    else:
        host_python = sys.executable.split(os.path.sep)[-3]

    # try to get git hash if possible
    dlc_basedir = os.path.dirname(os.path.dirname(dlcfile))
    git_hash = None
    try:
        git_hash = subprocess.check_output(["git", "rev-parse", "HEAD"],
                                           cwd=dlc_basedir)
        git_hash = git_hash.decode("utf-8").rstrip("\n")
    except subprocess.CalledProcessError:
        # not installed from git repo, eg. pypi
        # fine, pass quietly
        pass

    # get device info (GPU or CPU)
    dev = None
    if tf.test.is_gpu_available():
        gpu_name = tf.test.gpu_device_name()
        from tensorflow.python.client import device_lib

        dev_desc = [
            d.physical_device_desc for d in device_lib.list_local_devices()
            if d.name == gpu_name
        ]
        dev = [d.split(",")[1].split(":")[1].strip() for d in dev_desc]
        dev_type = "GPU"
    else:
        from cpuinfo import get_cpu_info

        dev = [get_cpu_info()["brand"]]
        dev_type = "CPU"

    return {
        "host_name": host_name,
        "op_sys": op_sys,
        "python": host_python,
        "device_type": dev_type,
        "device": dev,
        # pip freeze to get versions of all packages
        "freeze": list(freeze.freeze()),
        "python_version": sys.version,
        "git_hash": git_hash,
        "dlclive_version": VERSION,
    }
Exemplo n.º 41
0
#!/home/bv/bvenv/bin/python
"""
This script will set the database name for backup.
"""
import datetime
import logging
import os
import platform
from lib import my_env
from lib.my_env import run_script

cfg = my_env.init_env("bellavista", __file__)
dbname = "{host}_{date}.db".format(host=platform.node(), date=datetime.datetime.now().strftime("%Y%m%d"))
os.environ["LOCALDB"] = dbname
(fp, filename) = os.path.split(__file__)
for script in ["rebuild_sqlite.py", "murcs_Get.py"]:
    logging.info("Run script: {s}".format(s=script))
    run_script(fp, script)
Exemplo n.º 42
0
################################################
# Client Config - Verify with C2 Server config
################################################
SERVER = argv[1]
PORT = int(argv[2])
AGENT_PAGE = "/main.css"
SLEEP_TIME1 = 2
SLEEP_TIME2 = 5
SECRET_KEY = '000000000000000116s92k48dss923j640s234v849c2001qi231d950g3s9df01esdr'
SSL_VERSION = ssl.PROTOCOL_TLSv1_2
KILL_DATE = datetime(2022, 6, 11)

################################################
# Client Default Info
################################################
HOSTNAME = node().strip()
OS_VERSION = system().strip() + release().strip()
PID = getpid()
TYPE = "py"
PROTOCOL = "HTTPS"


################################################
# Client Request to C2
################################################
def cmd_formatter(send_data):
    # Put data in proper base64 format (really confusing)
    return b64encode(send_data.encode('utf-8')).decode('utf-8')


def request_headers(send_data):
Exemplo n.º 43
0
def tst_host():
    """Test session's short hostname value"""
    return platform.node().split('.')[0]
Exemplo n.º 44
0
# encoding:utf-8
import os.path
import platform
import logging
import os

if platform.node(
) == "JZLOG":  # JZLOG is the name of felinx' dedicated server.
    DEBUG = False
else:
    DEBUG = True
TEMPLATE_DEBUG = DEBUG

SITE_SRC_ROOT = os.path.dirname(__file__)

# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(SITE_SRC_ROOT, 'static/')

# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/static/'

# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/static/admin/'

try:
    # Secret settings
Exemplo n.º 45
0
def main(command_line_arguments):
    args = get_args(command_line_arguments)

    os.makedirs(args.outpile_folder, exist_ok=True)
    os.makedirs(args.results_folder, exist_ok=True)
    os.makedirs(args.processing_folder, exist_ok=True)

    machine_process_folder_name = platform.node() + '_P' + str(os.getpid())
    processing_folder = os.path.join(args.processing_folder,
                                     machine_process_folder_name)
    print(f'Creating processing folder "{processing_folder}"')
    os.makedirs(processing_folder, exist_ok=False)

    while True:
        # 1. Scan 'inpile' folder; if no files, exit
        geojson_files = glob.glob(os.path.join(args.inpile_folder,
                                               '*.geojson'))
        if len(geojson_files) == 0:
            print('No files found to process. Exiting...')
            return 0

        # 2. move a file to 'processing/machinename-processid'
        geojson_file_name = geojson_files[0]

        base_geojson_file_name = os.path.basename(geojson_file_name)
        target_geojson_file_name = os.path.join(args.processing_folder,
                                                machine_process_folder_name,
                                                base_geojson_file_name)
        try:
            # from https://docs.python.org/3/library/os.html
            # "...If successful, the renaming will be an atomic operation (this is a POSIX requirement)..."
            os.rename(geojson_file_name, target_geojson_file_name)

        #    on failure, go to 1
        except OSError:
            print(
                f'Failed to allocate "{geojson_file_name}", trying another...')
            continue

        # 3. process file in 'processing/machinename-processid'
        try:
            print(f'Processing "{geojson_file_name}"...')
            # analyse_polygons.main([...
            arguments = [
                'python',
                os.path.join('emeraldenv', 'analyse_polygons.py'),

                # 4. output results in 'results' folder
                '--output-folder',
                args.results_folder,
                '--index',
                args.index,
                '--primary-cache-size',
                args.primary_cache_size,
                '--loader',
                args.loader,
                '--verbose',
                # '-fng', '10',
                target_geojson_file_name
            ]
            print(f'Running: {arguments}')
            subprocess.run(arguments,
                           check=True,
                           stdout=subprocess.PIPE,
                           stderr=subprocess.STDOUT)
            print(f'Processing "{geojson_file_name}"... complete')
            print()

        except subprocess.CalledProcessError as e:
            print('ERROR Failed to execute')

            inpile_file_name = os.path.join(args.inpile_folder,
                                            base_geojson_file_name)
            print(
                f'geojson file moved from "{target_geojson_file_name}" back to "{inpile_file_name}"'
            )
            os.rename(target_geojson_file_name, inpile_file_name)

            print(f'Removing processing folder "{processing_folder}"')
            os.rmdir(processing_folder)

            error_file_name = os.path.join(
                args.outpile_folder, '__ERROR-' + machine_process_folder_name +
                '-' + os.path.splitext(base_geojson_file_name)[0] + '.txt')
            print(f'Reporting error to {error_file_name}')
            with open(error_file_name, 'w') as f:
                print(f'Command = "{e.cmd}"', file=f)
                print(file=f)

                stdout = ''
                if e.output is not None:
                    stdout = e.output.decode('ascii')
                print('==================== stdout ==================', file=f)
                print(stdout, file=f)
                print(file=f)

                stderr = ''
                if e.stderr is not None:
                    stderr = e.stderr.decode('ascii')
                print('==================== stderr ==================', file=f)
                print(stderr, file=f)

            print('Exiting...')
            exit(-1)

        # 5. move file from 'processing/machinename-processid' to 'outpile'
        outpile_file_name = os.path.join(args.outpile_folder,
                                         base_geojson_file_name)
        os.rename(target_geojson_file_name, outpile_file_name)

    print(f'Removing processing folder "{processing_folder}"')
    os.rmdir(processing_folder)
Exemplo n.º 46
0
 def get_node(self):
     """
     return node name
     hostname
     """
     return platform.node()
Exemplo n.º 47
0
# Django settings for djangotest project.

import platform
PRODUCTION_MODE = platform.node().startswith('http')

DEBUG = True
TEMPLATE_DEBUG = DEBUG

ADMINS = (('benoitc', '*****@*****.**'), )

MANAGERS = ADMINS

DATABASES = {
    'default': {
        'ENGINE': 'django.db.backends.sqlite3',
        'NAME': 'test.db',
    }
}

TIME_ZONE = 'America/Chicago'

LANGUAGE_CODE = 'en-us'

SITE_ID = 1

USE_I18N = True

USE_L10N = True

MEDIA_ROOT = ''
Exemplo n.º 48
0
def get_logger_config(log_dir,
                      logging_env="no_env",
                      edx_filename="edx.log",
                      dev_env=False,
                      syslog_addr=None,
                      debug=False,
                      local_loglevel='INFO',
                      service_variant='xqueue'):
    """

    Return the appropriate logging config dictionary. You should assign the
    result of this to the LOGGING var in your settings. The reason it's done
    this way instead of registering directly is because I didn't want to worry
    about resetting the logging state if this is called multiple times when
    settings are extended.

    If dev_env is set to true logging will not be done via local rsyslogd,
    instead, application logs will be dropped in log_dir.

    "edx_filename" is ignored unless dev_env is set to true since otherwise logging is handled by rsyslogd.

    """

    # Revert to INFO if an invalid string is passed in
    if local_loglevel not in ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']:
        local_loglevel = 'INFO'

    hostname = platform.node().split(".")[0]
    syslog_format = ("[service_variant={service_variant}]"
                     "[%(name)s][env:{logging_env}] %(levelname)s "
                     "[{hostname}  %(process)d] [%(filename)s:%(lineno)d] "
                     "- %(message)s").format(service_variant=service_variant,
                                             logging_env=logging_env,
                                             hostname=hostname)

    handlers = ['console', 'local'] if debug else ['local']
    if syslog_addr:
        handlers.append('syslogger-remote')

    logger_config = {
        'version': 1,
        'disable_existing_loggers': False,
        'formatters': {
            'standard': {
                'format':
                '%(asctime)s %(levelname)s %(process)d '
                '[%(name)s] %(filename)s:%(lineno)d - %(message)s',
            },
            'syslog_format': {
                'format': syslog_format
            },
            'raw': {
                'format': '%(message)s'
            },
        },
        'handlers': {
            'console': {
                'level': 'DEBUG' if debug else 'INFO',
                'class': 'logging.StreamHandler',
                'formatter': 'standard',
                'stream': sys.stdout,
            },
        },
        'loggers': {
            '': {
                'handlers': handlers,
                'level': 'DEBUG',
                'propagate': False
            },
            'pika': {
                'handlers': handlers,
                'level': 'WARNING',
                'propogate': True,
            }
        }
    }

    if syslog_addr:
        logger_config['handlers'].update({
            'syslogger-remote': {
                'level': 'INFO',
                'class': 'logging.handlers.SysLogHandler',
                'address': syslog_addr,
                'formatter': 'syslog_format',
            },
        })

    if dev_env:
        edx_file_loc = os.path.join(log_dir, edx_filename)
        logger_config['handlers'].update({
            'local': {
                'class': 'logging.handlers.RotatingFileHandler',
                'level': local_loglevel,
                'formatter': 'standard',
                'filename': edx_file_loc,
                'maxBytes': 1024 * 1024 * 2,
                'backupCount': 5,
            },
        })
    else:
        logger_config['handlers'].update({
            'local': {
                'level': local_loglevel,
                'class': 'logging.handlers.SysLogHandler',
                'address': '/dev/log',
                'formatter': 'syslog_format',
                'facility': SysLogHandler.LOG_LOCAL0,
            },
            'tracking': {
                'level': 'DEBUG',
                'class': 'logging.handlers.SysLogHandler',
                'address': '/dev/log',
                'facility': SysLogHandler.LOG_LOCAL1,
                'formatter': 'raw',
            },
        })

    return logger_config
Exemplo n.º 49
0
    email_failure(status,
                  failed_task,
                  failed_stage,
                  failed_tag,
                  errstr,
                  elapsed_time,
                  log_base=options.log_base)
else:
    elapsed_minutes = elapsed_time / 60.0
    print '''

####################################################################

AUTOBUILD FAILURE

Your autobuild[%s] on %s failed after %.1f minutes
when trying to test %s with the following error:

   %s

the autobuild has been abandoned. Please fix the error and resubmit.

####################################################################

''' % (options.branch, platform.node(), elapsed_minutes, failed_task, errstr)

cleanup()
print(errstr)
print("Logs in logs.tar.gz")
sys.exit(status)
def gethostname(x):
    import platform
    import time
    time.sleep(0.01)
    return x + (platform.node(), )
Exemplo n.º 51
0
#!/usr/bin/python
# Merge a wildcarded set of junit XML files into a single XML file,
# renaming the test suite to match the source XML file
#
# (C) 2016 Niall Douglas http://www.nedproductions.biz/
# File created: July 2016

from __future__ import print_function
import os, sys, glob, platform, datetime
import xml.etree.ElementTree as ET

hostname = platform.node()
timestamp = datetime.datetime.utcnow().isoformat() + 'Z'
properties = ET.Element('properties')


def add_property(name, value):
    global properties
    property = ET.Element('property', attrib={'name': name, 'value': value})
    properties.append(property)


add_property('os.name', platform.system())
add_property('os.platform', platform.processor())
add_property('os.release', platform.release())
add_property('os.version', platform.version())
# Is there a file called CMakeCXXCompiler.cmake anywhere in a CMakeFiles?
if os.path.exists('CMakeFiles'):
    files = glob.glob('CMakeFiles/*/CMakeCXXCompiler.cmake')
    if len(files) > 0:
        with open(files[0], 'rt') as ih:
Exemplo n.º 52
0
def run_benchmarks(nb_particles_short, time_julia_bench):

    nb_particles = nb_particles_dict[nb_particles_short]

    def create_command(command_template, nb_particles_short, t_end):
        return command_template.format(
            nb_particles_short=nb_particles_short,
            t_end=t_end,
            nb_particles=nb_particles,
        )

    print(
        f"First run to evaluate t_end from time_julia_bench={time_julia_bench}"
    )
    t_end = 0.08
    if nb_particles_short == "16k":
        t_end = 0.04

    name_dir, command_template = implementations["pythran high-level jit"]
    working_dir = path_base_repo / name_dir
    command = create_command(command_template, nb_particles_short, t_end)

    t_perf_start = perf_counter()
    run(command, working_dir)
    elapsed_time = perf_counter() - t_perf_start

    t_end = t_end * time_julia_bench / elapsed_time
    print(f"We'll run the benchmarks with t_end = {t_end}")

    timestamp_before = time()
    time_as_str = get_time_as_str()

    lines = []
    index_run = 0

    nb_loops = 2
    for i_loop in range(nb_loops):
        print(f"--- Running all benchmarks ({i_loop+1}/{nb_loops}) ---")
        for implementation, (
                name_dir,
                command_template,
        ) in implementations.items():
            working_dir = path_base_repo / name_dir

            # warmup
            for _ in range(1):
                command = create_command(command_template, nb_particles_short,
                                         0.004)
                run(command, working_dir)

            command = create_command(command_template, nb_particles_short,
                                     t_end)
            sleep(2)

            t_perf_start = perf_counter()
            timestamp_start = time()
            sleep(t_sleep_before)
            elapsed_time = perf_counter() - t_perf_start
            timestamp_end = time()

            sleep(2)

            lines.append([
                "sleep(t_sleep_before)",
                name_dir,
                index_run,
                timestamp_start,
                timestamp_end,
                elapsed_time,
            ])

            t_perf_start = perf_counter()
            timestamp_start = time()
            run(command, working_dir)
            elapsed_time = perf_counter() - t_perf_start
            timestamp_end = time()
            print(f"elapsed time: {elapsed_time:.3f} s")

            sleep(2)

            lines.append([
                implementation,
                name_dir,
                index_run,
                timestamp_start,
                timestamp_end,
                elapsed_time,
            ])
            index_run += 1

    columns = (
        "implementation language index timestamp_start timestamp_end elapsed_time"
    ).split()

    df = pd.DataFrame(
        lines,
        columns=columns,
    )

    df.sort_values("implementation", inplace=True)

    elapsed_pythran = df[df.implementation == "pythran"]["elapsed_time"].min()
    df["ratio_elapsed"] = df["elapsed_time"] / elapsed_pythran

    print(df)

    node = platform.node()

    path_dir_result = path_base_repo / "power/tmp"
    path_dir_result.mkdir(exist_ok=True)
    path_result = (path_dir_result /
                   f"{nb_particles_short}_{node}_{time_as_str}.csv")
    df.to_csv(path_result)

    if "grid5000" not in node:
        return

    from getwatt import getwatt

    timestamp_end = time()
    node_shortname = node.split(".")[0]
    try:
        conso = np.array(
            getwatt(node_shortname, timestamp_before, timestamp_end))
    except gzip.BadGzipFile:
        print("Error gzip.BadGzipFile. "
              "Power data will need to be upload later.")
        error_BadGzipFile = True
        path_result = path_result.with_name(path_result.stem + "_incomplete" +
                                            ".h5")
    else:
        error_BadGzipFile = False
        path_result = path_result.with_suffix(".h5")

    with h5py.File(str(path_result), "w") as file:
        file.attrs["t_end"] = t_end
        file.attrs["node"] = node
        file.attrs["node_shortname"] = node_shortname
        file.attrs["nb_particles_short"] = nb_particles_short
        file.attrs["time_julia_bench"] = time_julia_bench
        file.attrs["t_sleep_before"] = t_sleep_before
        file.attrs["nb_cpus"] = nb_cpus
        file.attrs["timestamp_before"] = timestamp_before
        file.attrs["timestamp_end"] = timestamp_end

    if error_BadGzipFile:
        return

    times = conso[:, 0]
    watts = conso[:, 1]
    with h5py.File(str(path_result), "a") as file:
        file.create_dataset("times",
                            data=times,
                            compression="gzip",
                            compression_opts=9)
        file.create_dataset("watts",
                            data=watts,
                            compression="gzip",
                            compression_opts=9)

    print(f"File {path_result} saved")
Exemplo n.º 53
0
        t_song_cindex = song_idx(dbh, t_song)
        #print('::::::: t_song_cindex = ', t_song_cindex )
        if t_song_cindex != None:
            arr_index = arr_idx(dbh, t_song_cindex[0])
            if arr_index != None:
                push_song_setlist(dbh, set_num, gig_idx, place_in_list,
                                  arr_index[0])
        else:
            print(t_song, ' not in database')
        place_in_list += 1


if __name__ == "__main__":
    import platform

    if platform.node() == "5CB345145G":  # work PC
        music_db = '/scratch/setlist/music_db.sqlite'
    else:
        music_db = '/Users/sisyp/Documents/SONA/scripts/music_db.sqlite'
    mdbh = sqlite3.connect(music_db)
    mc = mdbh.cursor()

    gig_file = input('Name of gig file> ')
    the_gig = read_gig_file(gig_file)
    gig_idx = gig_idx(mc, the_gig[0], the_gig[1])
    if gig_idx == None:
        gig_idx = create_gig(mc, the_gig[0], the_gig[1])
        print('index for the gig is ', gig_idx)
    else:
        print(the_gig[0], ' on ', the_gig[1], ' is already in the database')
Exemplo n.º 54
0
#!/usr/bin/env python3

import platform
from PyQt5.QtCore import *
from config import *
if platform.machine() == "armv7l" and platform.node() == "raspberrypi":
    import RPi.GPIO as GPIO
    import PCF8591 as ADC
    import time
    from neopixel import *
    import argparse

# LED strip configuration:
LED_COUNT = 16  # Number of LED pixels.
LED_PIN = 18  # GPIO pin connected to the pixels (18 uses PWM!).
#LED_PIN        = 10      # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).
LED_FREQ_HZ = 800000  # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10  # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 255  # Set to 0 for darkest and 255 for brightest
LED_INVERT = False  # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0  # set to '1' for GPIOs 13, 19, 41, 45 or 53


class AnalogDetection(QObject):
    # pwm = 8, AD = 2,3
    COLOR_R = 0
    COLOR_G = 1
    COLOR_B = 2
    KEY_UP = 1
    KEY_DOWN = 0
    LED_ON = 1  # GPIO.HIGH
Exemplo n.º 55
0
def is_facet_srv():
    nodename = platform.node()
    if nodename[0:-2] == 'facet-srv':
        return True
    else:
        return False
Exemplo n.º 56
0
CACHE_PREFIX = 'kuma:'
CACHE_COUNT_TIMEOUT = 60  # seconds

CACHES = {
    'default': {
        'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
        'TIMEOUT': 60,
        'KEY_PREFIX': 'kuma',
    }
}

# Addresses email comes from
DEFAULT_FROM_EMAIL = '*****@*****.**'
SERVER_EMAIL = '*****@*****.**'

PLATFORM_NAME = platform.node()

# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'US/Pacific'

# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-US'

# Supported languages
SUMO_LANGUAGES = (
    'ak',
Exemplo n.º 57
0
def get_platform():
    return platform.node()
Exemplo n.º 58
0
 def hostname():
     return platform.node()
Exemplo n.º 59
0
def run_benchmark(env,
                  config,
                  run_path='runs',
                  run_id=None,
                  commands=None,
                  overrides=[],
                  suite='benchmark',
                  dry_run=False,
                  progress="undefined"):
    if not run_id:
        run_id = str(time.time())
    os.makedirs('{}/{}/{}/{}/'.format(run_path, run_id, suite, env.name),
                exist_ok=True)

    with open(config) as f:
        bench = yaml.load(f)

    # Make sure we at least have an empty vars list
    if 'variables' not in bench:
        bench['variables'] = {}

    # If any command is given directly as the command value, change it to dict
    for cmd, cmdc in bench['commands'].items():
        if type(cmdc) is not dict:
            bench['commands'][cmd] = {'command': cmdc}

    # Evaluate configuration overrides
    for override in overrides:
        bench['variables'].update(override.get('variables', {}))
        for cmd, cmdc in override.get('commands', {}).items():
            # Allow just a string
            if type(cmdc) is str:
                if cmdc.strip() == 'drop':
                    del bench['commands'][cmd]
                    continue
                cmdc = {'command': cmdc}
            if cmd in bench['commands']:
                # Change an already existent command config
                if 'variables' in bench['commands'][cmd]:
                    bench['commands'][cmd]['variables'].update(
                        cmdc.pop('variables', {}))
                bench['commands'][cmd].update(cmdc)
            else:
                # Add a new command config
                bench['commands'][cmd] = cmdc

    ncommands = len(bench['commands'])
    for i, endpoint in enumerate(bench['commands']):
        progress_inside = '{} ({}/{}) in {}'.format(endpoint, i + 1, ncommands,
                                                    progress)
        print('### Running benchmark endpoint', progress_inside)

        if commands is not None and endpoint not in commands:
            print('[skipped this endpoint]')
            continue

        var_matrix = deepcopy(bench['variables'])

        cmd = bench['commands'][endpoint]

        if type(cmd) is dict:
            var_matrix.update(cmd.get('variables', {}))
            cmd = cmd['command']

        var_matrix = {
            k:
            (var_matrix[k] if type(var_matrix[k]) is list else [var_matrix[k]])
            for k in var_matrix
        }
        var_matrix = {k: [str(i) for i in var_matrix[k]] for k in var_matrix}

        # Allow running e.g. 'python -m ibench'
        if type(cmd) is str:
            cmd = shlex.split(cmd)

        if dry_run:
            cmd = ["echo", "\#skipped:"] + cmd

        # Cartesian product of arguments
        keys = var_matrix.keys()
        vals = var_matrix.values()
        product_length = reduce(lambda x, y: x * len(y), vals, 1)
        for i, values in enumerate(itertools.product(*vals)):
            print('## Running combination {}/{} of {}'.format(
                i + 1, product_length, progress_inside))
            arg_run = dict(zip(keys, values))
            for k, v in arg_run.items():
                print(f'# {k} = {v}')

            # add automatically generated variables
            arg_run['env_name'] = env.name
            arg_run['hostname'] = platform.node()

            data = ''
            with env.call(cmd, env=arg_run, stdout=PIPE) as proc:
                for line in iter(proc.stdout.readline, b''):
                    data += line.decode()
                    print(line.decode(), end='')
            print("")

            # output to file
            output_prefix = '{}/{}/{}/{}/{}_{}.out'.format(
                run_path, run_id, suite, env.name, time.time(), endpoint)
            with open(output_prefix, 'w') as fd:
                fd.write(data)

            # output the environment we created as well.
            with open(output_prefix + '.meta', 'w') as fd:
                yaml.dump(arg_run, fd)
Exemplo n.º 60
0
def _check():
    """Try to detect current device."""

    # TODO: turn platform IDs to proper constants

    # qrc is currently used only on Android, so if we are running with
    # qrc, we are on Android
    if qrc.is_qrc:
        return "android"

    try:
        import platform
        if platform.node() == "Sailfish":
            return "jolla"
    except:
        log.exception(
            "the Python stdlib platform module is apparently unusable on this platform"
        )

    # check CPU architecture
    import subprocess

    proc = subprocess.Popen([
        'uname',
        '-m',
    ],
                            stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE)
    arch = str(proc.communicate()[0])
    if ("i686" in arch) or ("x86_64" in arch):
        log.info("* PC detected")
        return "pc"  # we are most probably on a PC
    if sys.platform == "qnx6":
        log.info("* BlackBerry 10 device detected")
        return "bb10"

    # check procFS
    if os.path.exists("/proc/cpuinfo"):
        f = open("/proc/cpuinfo", "r")
        cpuinfo = f.read()
        f.close()
        if "Nokia RX-51" in cpuinfo:  # N900
            log.info("* Nokia N900 detected")
            return "n900"
        # N9 and N950 share the same device module
        elif "Nokia RM-680" in cpuinfo:  # N950
            log.info("* Nokia N950 detected")
            return "n9"
        elif "Nokia RM-696" in cpuinfo:  # N9
            log.info("* Nokia N9 detected")
            return "n9"
        elif "GTA02" in cpuinfo:  # N9
            log.info("* Neo FreeRunner GTA02 detected")
            return "neo"

    # check lsb_release
    try:
        proc = subprocess.Popen(['lsb_release', '-s', '-i'],
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE)
        distributionId = proc.communicate()[0].decode("utf-8").lower().strip()
        log.info("lsb_release distro id: %s ", distributionId)
        # import pdb; pdb.set_trace()
        if distributionId == 'mer':
            # TODO: could be also Nemo mobile or other Mer based distro,
            # we should probably discern those two in the future
            log.info("* Jolla (or other Mer based device) detected")
            return "jolla"
    except:
        log.exception("running lsb_release during platform detection failed")

    return None