Exemplo n.º 1
0
def gr_fix_all():
    print os.getpid(), 'fixing'
    c = 0
    #filter for missing self in match master
    for art in Article.objects.iterator():
        #fix wrong years
        for cat in art.categories.iterator():
            if 'births' in cat.name:
                yr = re.search('\d+', cat.name)
                if yr != None:
                    yr = yr.group()
                    art.birth = int(yr)
                    art.save()

            if 'deaths' in cat.name:
                yr = re.search('\d+', cat.name)
                if yr != None:
                    yr = int(yr.group())
                    art.death = yr
                    art.save()        

        #fix self reference
        art.match_master = art
        art.save()

        c+=1
        if c % 100 == 0:
            print c, os.getpid(), 'fix all'
Exemplo n.º 2
0
def get_now_python_process():
    """
    現在のpythonプロセスを返す
    """

    logger.debug("now process id: {}\n".format(os.getpid()))
    return psutil.Process(os.getpid())
Exemplo n.º 3
0
def run_net((k,theta,T,g_inh_,spike_delay)):
    seed(int(os.getpid()*time.time()))
    print os.getpid()
    reinit()
    reinit_default_clock()
    clear(True)
    gc.collect()
    
    PKJ = PurkinjeCellGroup(1)
    PKJ.V = PKJ.El
    
    spikes = SpikeMonitor(PKJ)
    spikes.last_spike = None
    V_monitor = StateMonitor(PKJ,'V',record=0)
    ginh_monitor = StateMonitor(PKJ, 'g_inh', record=0)
    
    @network_operation(Clock(dt=defaultclock.dt))
    def random_current():
        PKJ.I = gamma(k,theta,size=len(PKJ)) * nA
        
    @network_operation(Clock(dt=defaultclock.dt))
    def trigger_spike():
        if spikes.spiketimes[0].shape[0] > 0:
            spikes.last_spike = spikes.spiketimes[0][-1]*second
        if spikes.last_spike is not None:
            if abs(defaultclock.t - (spikes.last_spike + spike_delay)) < .000001*ms:
                PKJ.g_inh = g_inh_
        
    run(T)

    V_monitor.insert_spikes(spikes)
    first_isi = diff(spikes.spiketimes[0])[0]
    
    return V_monitor.getvalues(), first_isi, spikes.spiketimes
Exemplo n.º 4
0
Arquivo: server.py Projeto: befks/odoo
    def process_work(self):
        rpc_request = logging.getLogger('odoo.netsvc.rpc.request')
        rpc_request_flag = rpc_request.isEnabledFor(logging.DEBUG)
        _logger.debug("WorkerCron (%s) polling for jobs", self.pid)
        db_names = self._db_list()
        if len(db_names):
            self.db_index = (self.db_index + 1) % len(db_names)
            db_name = db_names[self.db_index]
            self.setproctitle(db_name)
            if rpc_request_flag:
                start_time = time.time()
                start_rss, start_vms = memory_info(psutil.Process(os.getpid()))

            import odoo.addons.base as base
            base.ir.ir_cron.ir_cron._acquire_job(db_name)
            odoo.modules.registry.Registry.delete(db_name)

            # dont keep cursors in multi database mode
            if len(db_names) > 1:
                odoo.sql_db.close_db(db_name)
            if rpc_request_flag:
                run_time = time.time() - start_time
                end_rss, end_vms = memory_info(psutil.Process(os.getpid()))
                vms_diff = (end_vms - start_vms) / 1024
                logline = '%s time:%.3fs mem: %sk -> %sk (diff: %sk)' % \
                    (db_name, run_time, start_vms / 1024, end_vms / 1024, vms_diff)
                _logger.debug("WorkerCron (%s) %s", self.pid, logline)

            self.request_count += 1
            if self.request_count >= self.request_max and self.request_max < len(db_names):
                _logger.error("There are more dabatases to process than allowed "
                              "by the `limit_request` configuration variable: %s more.",
                              len(db_names) - self.request_max)
        else:
            self.db_index = 0
    def start(self):
        try:
            runtime.logger.info('Server starting...')
            # start services
            runtime.logger.info('Starting services...')
            services.start()
        except Exception as e:
            runtime.logger.error(e, *(), **{'exc_info': True})
            # stop services
            services.stop()
            raise e

        # start asyn thread
        self._asyn_thread = Thread(target=self._async_loop,
                                   name='Asyncore thread')
        self._asyn_thread.start()

        self.running = True

        # record process id
        pidfile = open(PID_FILE, "w")
        if os.name == 'posix':
            pidfile.write(str(os.getpgid(os.getpid())))
        else:
            pidfile.write(str(os.getpid()))
        pidfile.close()

        runtime.logger.info('Porcupine Server started succesfully')
        print('Porcupine Server v%s' % __version__)
        python_version = 'Python %s' % sys.version
        runtime.logger.info(python_version)
        print(python_version)
        print('''Porcupine comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it under
certain conditions; See COPYING for more details.''')
Exemplo n.º 6
0
    def __enter__(self):
        """ Get a session cookie to use for future requests. """

        self._entry_dir = os.path.join(querymod.configuration['repo_path'], str(self._uuid))
        if not os.path.exists(self._entry_dir) and not self._initialize:
            raise querymod.RequestError('No deposition with that ID exists!')
        try:
            if self._initialize:
                self._repo = Repo.init(self._entry_dir)
                with open(self._lock_path, "w") as f:
                    f.write(str(os.getpid()))
                self._repo.config_writer().set_value("user", "name", "BMRBDep").release()
                self._repo.config_writer().set_value("user", "email", "*****@*****.**").release()
                os.mkdir(os.path.join(self._entry_dir, 'data_files'))
            else:
                counter = 100
                while os.path.exists(self._lock_path):
                    counter -= 1
                    time.sleep(random.random())
                    if counter <= 0:
                        raise querymod.ServerError('Could not acquire entry directory lock.')
                with open(self._lock_path, "w") as f:
                    f.write(str(os.getpid()))
                self._repo = Repo(self._entry_dir)
        except NoSuchPathError:
            raise querymod.RequestError("'%s' is not a valid deposition ID." % self._uuid,
                                        status_code=404)

        return self
Exemplo n.º 7
0
def parsemsg(sock, msgparts):
    """Parse a message and act on commands"""
    if msgparts[0] == 'PING':
        content = msgparts[1]
        sock.send(('PONG ' + content + '\r\n').encode())
    elif msgparts[1] == 'PRIVMSG':
        logmsg(' '.join(msgparts))
        message = bot_logic.Message(msgparts)
    elif msgparts[1] == '321':  #list start reply
        logmsg(' '.join(msgparts))
        server = msgparts[0][1:]
        f = open('channels/{pid}'.format(pid=os.getpid()),'w')
        f.write('Channels on server: {serv}\n'.format(serv = server))
        f.write('Channel:    Users:    Topic:\n')
        f.close()
    elif msgparts[1] == '322':  #list data reply
        logmsg(' '.join(msgparts))
        channel = msgparts[3] + '; ' +  msgparts[4] + '; ' + ' '.join(msgparts[5:])[1:] + '\n'
        f = open('channels/{pid}'.format(pid=os.getpid()),'a')
        f.write(channel)
        f.close()
    elif msgparts[1] == '323':  #list end reply
        logmsg(' '.join(msgparts))
        f = open('channels/{pid}'.format(pid=os.getpid()),'r')
        data = f.read()
        f.close()
        print(data)
Exemplo n.º 8
0
 def handle(self):
     data = self.request.recv(BUF_SIZE)
     current_process_id = os.getpid()
     response = '%s: %s' %(current_process_id, data)
     print "PID: %s Server sending response [current_process_id: data] = [%s]" %(os.getpid(), response)
     self.request.send(response)
     return
Exemplo n.º 9
0
    def _start(self):
        self.pre_start()
        self.verbose_output.write('Starting service...\n')
        if self.pid:
            print('PID %s exists; process already running (clear %s if this is wrong)\n' % (self.pid, self.lockfile))
            return
        
        context = contextlib.ExitStack()
        if self.fork:
            context = daemon.DaemonContext(files_preserve=[self.verbose_output])
        
        if self.executable_str:
            # Use Popen to run the string command
            self.verbose_output.write("String command: %s\n" % self.executable_str)
            self.verbose_output.write("Run to look like command: %s\n" % self.prog_name)
            cmd = [self.executable_str] + self.exec_args
            executable = None
            if self.prog_name:
                cmd[0] = self.prog_name
                executable = self.executable_str

            proc = subprocess.Popen(cmd, executable=executable)
            self.pid = proc.pid
            self.verbose_output.write('Service started. %s\n' % self.pid)
        else:
            # Use daemon context to run the Python command
            self.verbose_output.write('Running %s(%s).\n' % (self.executable_py, self.exec_args))
            with context:
                print(os.getpid())
                self.pid = os.getpid()
                self.executable_py(*self.exec_args)
        self.post_start()
Exemplo n.º 10
0
def main():
    print 'Process (%s) start...' % os.getpid()
    pid = os.fork()
    if pid==0:
        print 'I am child process (%s) and my parent is %s.' % (os.getpid(), os.getppid())
    else:
        print 'I (%s) just created a child process (%s).' % (os.getpid(), pid)
Exemplo n.º 11
0
    def release(self):
        """Release the sandbox for further use

        Unlocks and releases the jail for reuse by others.
        Must be called exactly once after Jail.is_alive == False.

        """
        if self.is_alive:
            raise SandboxError("Sandbox released while still alive")
        if not self.locked:
            raise SandboxError("Attempt to release jail that is already unlocked")
        if os.system("sudo umount %s" % (os.path.join(self.base_dir, "root"),)):
            raise SandboxError("Error returned from umount of jail %d"
                    % (self.number,))
        lock_dir = os.path.join(self.base_dir, "locked")
        pid_filename = os.path.join(lock_dir, "lock.pid")
        with open(pid_filename, 'r') as pid_file:
            lock_pid = int(pid_file.read())
            if lock_pid != os.getpid():
                # if we ever get here something has gone seriously wrong
                # most likely the jail locking mechanism has failed
                raise SandboxError("Jail released by different pid, name %s, lock_pid %d, release_pid %d"
                        % (self.name, lock_pid, os.getpid()))
        os.unlink(pid_filename)
        os.rmdir(lock_dir)
        self.locked = False
def producer(global_conf_file, queueIn, queueProducer):
    print "[producer-pid({}): log] Started a producer worker at {}".format(os.getpid(), get_now())
    sys.stdout.flush()
    random_sleep()
    print "[producer-pid({}): log] Producer worker ready at {}".format(os.getpid(), get_now())
    queueProducer.put("Producer ready")
    update_count = 0
    while True:
        try:
            start_get_batch = time.time()
            random_sleep()
            if update_count<=max_update_count:
                update_id = "update_{}".format(update_count)
                str_list_sha1s = ["sha1_{}".format(x) for x in range(10)]
                valid_sha1s = str_list_sha1s
                update_count += 1
                print "[producer-pid({}): log] Got batch in {}s at {}".format(os.getpid(), time.time() - start_get_batch, get_now())
                sys.stdout.flush()
            else:
                print "[producer-pid({}): log] No more update to process.".format(os.getpid())
                return end_producer(queueIn)
            start_precomp = time.time()
            print "[producer-pid({}): log] Pushing update {} at {}.".format(os.getpid(), update_id, get_now())
            sys.stdout.flush()
            queueIn.put((update_id, valid_sha1s, start_precomp))
            print "[producer-pid({}): log] Pushed update {} to queueIn at {}.".format(os.getpid(), update_id, get_now())
            sys.stdout.flush()
        except Exception as inst:
            print "[producer-pid({}): error] Error at {}. Leaving. Error was: {}".format(os.getpid(), get_now(), inst)
            return end_producer(queueIn)
Exemplo n.º 13
0
 def insert_data(self, query, args):
     self.logger.debug("Inserting logs to searchd")
     result = None
     for _ in 1, 2, 3:
         try:
             c = self.conn.cursor()
             result = c.execute(query, args)
             self.logger.debug("%s rows inserted" % c.rowcount)
             c.close()
         except ProgrammingError:
             self.logger.exception(
                 "Can't insert values to index: %s" % query)
         except DatabaseError as e:
             self.logger.exception("Sphinx connection error: %s" % e)
             try:
                 close_old_connections()
             except Exception as e:
                 self.logger.exception("Can't reconnect: %s" % e)
                 os.kill(os.getpid(), signal.SIGKILL)
         except Exception:
             self.logger.exception("Unhandled error in insert_data")
         else:
             return result
     self.logger.error("Can't insert data in 3 tries, exit process")
     os.kill(os.getpid(), signal.SIGKILL)
Exemplo n.º 14
0
    def serve(self):
        DEBUG(self.options.classpath)
        # redirect stdout/stderr to a ZMQ socket
        self.sysout = sys.stdout
        self.syserr = sys.stderr
        try:
            context = zmq.Context()
            socket = context.socket(zmq.PUB)
            DEBUG('binding output to ' + self.options.out_url)
            socket.bind(self.options.out_url)
            sys.stdout = OutStream(socket, 'stdout')
            sys.stderr = sys.stdout
        except Exception:
            print >> self.sysout, \
                  '<<<%s>>> ZMQServer -- setup on %s failed:' \
                  % (os.getpid(), self.options.out_url)
            traceback.print_exc(file=self.sysout)
            sys.exit(1)

        try:
            self.obj = self.ctor()
            DEBUG('obj=' + str(self.obj))
            ZmqCompWrapper.serve(self.obj,
                                 rep_url=self.options.rep_url,
                                 pub_url=self.options.pub_url)
        except Exception:
            print >> self.sysout, \
                  '<<<%s>>> ZMQServer -- wrapper failed:' % os.getpid()
            traceback.print_exc(file=self.sysout)
            sys.exit(1)
Exemplo n.º 15
0
def pick_triplets_impl(q_in, q_out):
  more = True
  while more:
      deq = q_in.get()
      if deq is None:
        more = False
      else:
        embeddings, emb_start_idx, nrof_images, alpha = deq
        print('running', emb_start_idx, nrof_images, os.getpid())
        for j in xrange(1,nrof_images):
            a_idx = emb_start_idx + j - 1
            neg_dists_sqr = np.sum(np.square(embeddings[a_idx] - embeddings), 1)
            for pair in xrange(j, nrof_images): # For every possible positive pair.
                p_idx = emb_start_idx + pair
                pos_dist_sqr = np.sum(np.square(embeddings[a_idx]-embeddings[p_idx]))
                neg_dists_sqr[emb_start_idx:emb_start_idx+nrof_images] = np.NaN
                all_neg = np.where(np.logical_and(neg_dists_sqr-pos_dist_sqr<alpha, pos_dist_sqr<neg_dists_sqr))[0]  # FaceNet selection
                #all_neg = np.where(neg_dists_sqr-pos_dist_sqr<alpha)[0] # VGG Face selecction
                nrof_random_negs = all_neg.shape[0]
                if nrof_random_negs>0:
                    rnd_idx = np.random.randint(nrof_random_negs)
                    n_idx = all_neg[rnd_idx]
                    #triplets.append( (a_idx, p_idx, n_idx) )
                    q_out.put( (a_idx, p_idx, n_idx) )
        #emb_start_idx += nrof_images
  print('exit',os.getpid())
Exemplo n.º 16
0
    def submit(self, command, parameters="", myDir=os.getcwd()):
        '''Submits a job
        '''
        jobFile = "/tmp/job-%d.%d" % (os.getpid(), self.cnt)
        w = open(jobFile, "w")
        w.write("#PBS -l mem=%dmb,vmem=%dmb\n" % (self.mem, self.mem))
        w.write("#PBS -q %s\n" % self.queue)
        if self.out is not None:
            w.write("#PBS -o %s\n" % self.out)
            self.out = None
        w.write("cd %s\n" % os.getcwd())
        w.write("%s %s\n" % (command, parameters))
        w.close()

        job = "qsub %s" % (jobFile,)
        statusFile = "/tmp/farm-%d.%d" % (os.getpid(), self.cnt)
        os.system(job + " >%s 2>/dev/null" % statusFile)
        f = open(statusFile)
        l = f.readline()
        job = int(l.split(".")[0])
        f.close()
        os.remove(statusFile)
        os.remove(jobFile)
        self.cnt += 1

        self.running.append(job)
Exemplo n.º 17
0
    def submit(self, command, parameters='', myDir=os.getcwd()):
        '''Submits a job
        '''
        jobFile = "/tmp/job-%d.%d" % (os.getpid(), self.cnt)
        w = open(jobFile, "w")
        if self.out is not None:
            out = '-o %s' % self.out
            self.out = None
        else:
            out = ''
        w.write('#!/bin/bash\n')
        w.write("%s %s\n" % (command, parameters))
        w.close()

        statusFile = "/tmp/farm-%d.%d" % (os.getpid(), self.cnt)
        job = "sbatch --mem=%d %s -p %s %s" % (self.mem, out,
                                               self.partition, jobFile)
        statusFile = "/tmp/farm-%d.%d" % (os.getpid(), self.cnt)
        os.system(job + " > %s 2> /dev/null" % statusFile)
        f = open(statusFile)
        l = f.readline()
        job = int(l.rstrip().split(" ")[-1])
        f.close()
        os.remove(statusFile)
        os.remove(jobFile)
        self.cnt += 1

        self.running.append(job)
Exemplo n.º 18
0
def base():
    print "%s" % (request.__dict__,)
    print "\n\nFORM: %s" % (request.form,)
    logger.info("(%s) logging for hellow world!" % (os.getpid(),))
    time.sleep(int(request.args.get("ss",0)))
    logger.info("(%s) returning" % (os.getpid(),))
    return "(%s) hellow world!" % (os.getpid(),)
Exemplo n.º 19
0
    def submit(self, command, parameters="", myDir=os.getcwd()):
        '''Submits a job
        '''
        jobFile = "/tmp/job-%d.%d" % (os.getpid(), self.cnt)
        w = open(jobFile, "w")
        w.write("%s %s\n" % (command, parameters))
        w.close()

        if self.mailUser is not None:
            mail = "-m %s -M %s" % (self.mailOptions, self.mailUser)
        else:
            mail = ""
        while len(self.running) > self.maxProc:
            self.wait(beCareful=5)
        hosts = ""
        if len(self.hosts) > 0:
            hosts = " -q "
        for host in self.hosts:
            hosts += "\*@%s" % host
            if host != self.hosts[-1]:
                hosts += ","
        job = "qsub %s %s -S /bin/bash -V -P %s -cwd -l h_vmem=%dm %s " % (
            mail, hosts, self.project, self.mem, jobFile)
        statusFile = "/tmp/farm-%d.%d" % (os.getpid(), self.cnt)
        os.system(job + " >%s 2>/dev/null" % statusFile)
        f = open(statusFile)
        l = f.readline()
        job = int(l.split(" ")[2])
        f.close()
        os.remove(statusFile)
        os.remove(jobFile)
        self.cnt += 1

        self.running.append(job)
Exemplo n.º 20
0
    def __init__(self, is_rpc = True):
        super(MessageManager, self).__init__()
        self.setDaemon(True)

        if self.DEBUG:
            import logging
            snakemq.init_logging(open("snakemq-%s.log" % getpid(), "w"))
            logger = logging.getLogger("snakemq")
            logger.setLevel(logging.DEBUG)

        self.__pid = getpid()
        self.__rpc_name = "golismero-rpc-%d" % self.__pid
        self.__queue_name = "golismero-queue-%d" % self.__pid
        if is_rpc:
            self.__name = self.__rpc_name
        else:
            self.__name = self.__queue_name
        self.debug("__init__(%r)" % self.name)

        self.__link = snakemq.link.Link()
        self.__packeter = snakemq.packeter.Packeter(self.__link)
        self.__messaging = snakemq.messaging.Messaging(
            self.__name, "", self.__packeter)
        self.__messaging.on_message_recv.add(self.__callback)

        self.__address = None

        self.__queue = Queue()
        ##self.__queue.mutex = RLock()
        ##self.__queue.not_empty = Condition(self.__queue.mutex)
        ##self.__queue.not_full = Condition(self.__queue.mutex)
        ##self.__queue.all_tasks_done = Condition(self.__queue.mutex)

        self.debug("__init__(%r) => completed" % self.name)
Exemplo n.º 21
0
def connect(dbname,schema=None):
    print("DB.connect db=%s schema=%s pid=%s"%(dbname,schema,os.getpid()))
    try:
        if (dbname,schema) in connections:
            return connections[(dbname,schema)]
        if len(connections) >= MAX_CONS:
            print_color("need to close oldest connection (pid=%s, num_cons=%s)" %
                        (os.getpid(), len(connections)), "red")
            print("existing connections: %s" % ",".join(sorted([x for x in connections.keys()])))
            close_oldest_connection()
        url = config.get_db_url(dbname)
        res = urlparse(url)
        args = {
            "host": res.hostname,
            "database": res.path[1:],
        }
        if res.port:
            args["port"] = res.port
        if res.username:
            args["user"] = res.username
        if res.password:
            args["password"] = res.password
        db = Connection(**args)
        print("  => con_id=%s"%db.con_id)
        db._dbname = dbname
        db._schema = schema
        connections[(dbname,schema)] = db
        return db
    except Exception as e:
        import traceback
        traceback.print_exc(file=sys.stdout)
        raise Exception("Failed to connect: %s" % e)
Exemplo n.º 22
0
        def playback_status(process, stop_event, write_lock, pausing_keep):
            """Poll mplayer process for time_pos song and ending.

            When song has ended, send a SIGUSR1 signal. When time_pos is larger
            than 30s, send a SIGUSR2 signal to report the song.

            When ``stop_event`` is set, exit thread.

            """
            reported = False
            time_pos_rex = re.compile(r'GLOBAL: ANS_TIME_POSITION=([0-9]+\.[0-9]+)')
            while not stop_event.is_set():
                if not reported:
                    with write_lock:
                        process.write('{} get_time_pos\n'.format(pausing_keep))
                stdout = process.read()
                if stdout:
                    if 'GLOBAL: EOF code: 1' in stdout:         #End of a playing track
                        os.kill(os.getpid(), signal.SIGUSR1)
                    if not reported:
                        match = time_pos_rex.search(stdout)
                        if match and float(match.group(1)) >= 30:
                            os.kill(os.getpid(), signal.SIGUSR2)
                            reported = True
                stop_event.wait(0.5)
    def __init__(self, name='', host=''):
        cmd.Cmd.__init__(self)

        print '<<<'+str(os.getpid())+'>>> ConsoleServer ..............'
        
        #intercept stdout & stderr
        self.sysout = sys.stdout
        self.syserr = sys.stderr
        self.cout = StringIO()
        sys.stdout = self.cout
        sys.stderr = self.cout

        self.intro  = 'OpenMDAO '+__version__+' ('+__date__+')'
        self.prompt = 'OpenMDAO>> '
        
        self._hist    = []      ## No history yet
        self.known_types = []

        self.host = host
        self.pid = os.getpid()
        self.name = name or ('-cserver-%d' % self.pid)
        self.orig_dir = os.getcwd()
        self.root_dir = tempfile.mkdtemp(self.name)
        if os.path.exists(self.root_dir):
            logging.warning('%s: Removing existing directory %s',
                            self.name, self.root_dir)
            shutil.rmtree(self.root_dir)
        os.mkdir(self.root_dir)
        os.chdir(self.root_dir)
        
        print 'root_dir=',self.root_dir
        
        self.projfile = ''
        self.proj = None
        self.exc_info = None
Exemplo n.º 24
0
def acquire(kmotion_dir, mutex):
    """ 
    Aquire the 'mutex' mutex lock, very carefully
    
    args    : kmotion_dir ... the 'root' directory of kmotion 
              mutex ...       the actual mutex
    excepts : 
    return  : none
    """
    
    while True:
        # wait for any other locks to go
        while True:
            if check_lock(kmotion_dir, mutex) == 0:
                break
            time.sleep(0.01)
        
        # add our lock
        f_obj = open('%s/www/mutex/%s/%s' % (kmotion_dir, mutex, os.getpid()), 'w')
        f_obj.close()
            
        # wait ... see if another lock has appeared, if so remove our lock
        # and loop
        time.sleep(0.1)
        if check_lock(kmotion_dir, mutex) == 1:
            break
        os.remove('%s/www/mutex/%s/%s' % (kmotion_dir, mutex, os.getpid()))
        # random to avoid mexican stand-offs
        time.sleep(float(random.randint(01, 40)) / 1000)
Exemplo n.º 25
0
    def interpret(self):
        '''
        Interpret the messages being put into the Pipe, and do something with
        them. Messages are always sent in a 2-arg tuple (fname, arg)
        Right now we only expect one function and one argument but this could
        be generalized to **args.
        '''
        #info("brain")

        fname, arg = self.conn.recv() # Waits here to receive a new message
        self.logger.debug("{} received message {}".format(os.getpid(), (fname, arg)))

        func = self.func_dict.get(fname, False)
        if func:
            response = func(arg)
        else:
            self.logger.info("Given an unknown function {}, assuming kill signal.".format(fname))
            return False

        # Functions only return a response other than None when they want them
        # communicated back to the master process.
        # Some commands sent to the child processes do not require a response
        # to the main process.
        if response:
            self.logger.debug("{} sending back {}".format(os.getpid(), response))
            self.conn.send(response)
        return True
Exemplo n.º 26
0
    def init(self, debug=False):
        """Initialize logging for MI.  Because this is a singleton it will only be initialized once."""
        path = os.environ[LOGGING_CONFIG_ENVIRONMENT_VARIABLE] if LOGGING_CONFIG_ENVIRONMENT_VARIABLE in os.environ else None
        haveenv = path and os.path.isfile(path)
        if path and not haveenv:
            print >> os.stderr, 'WARNING: %s was set but %s was not found (using default configuration files instead)' % (LOGGING_CONFIG_ENVIRONMENT_VARIABLE, path)
        if path and haveenv:
            config.replace_configuration(path)
            if debug:
                print >> sys.stderr, str(os.getpid()) + ' configured logging from ' + path
        elif os.path.isfile(LOGGING_PRIMARY_FROM_FILE):
            config.replace_configuration(LOGGING_PRIMARY_FROM_FILE)
            if debug:
                print >> sys.stderr, str(os.getpid()) + ' configured logging from ' + LOGGING_PRIMARY_FROM_FILE
        else:
            logconfig = pkg_resources.resource_string('mi', LOGGING_PRIMARY_FROM_EGG)
            parsed = yaml.load(logconfig)
            config.replace_configuration(parsed)
            if debug:
                print >> sys.stderr, str(os.getpid()) + ' configured logging from config/' + LOGGING_PRIMARY_FROM_FILE

        if os.path.isfile(LOGGING_MI_OVERRIDE):
            config.add_configuration(LOGGING_MI_OVERRIDE)
            if debug:
                print >> sys.stderr, str(os.getpid()) + ' supplemented logging from ' + LOGGING_MI_OVERRIDE
        elif os.path.isfile(LOGGING_CONTAINER_OVERRIDE):
            config.add_configuration(LOGGING_CONTAINER_OVERRIDE)
            if debug:
                print >> sys.stderr, str(os.getpid()) + ' supplemented logging from ' + LOGGING_CONTAINER_OVERRIDE
Exemplo n.º 27
0
def check_command():
    """ (None) -> None
    Check the communication file for any commands given.
    Execute according to the commands.
    """
    # Load the relevant configs
    logging.debug("Checking for any new command on communication stream")
    conf = common.get_config()['communication']
    msg = comm_read()

    # Let the output print back to normal for printing status
    sys.stdout = sys.__stdout__
    sys.stderr = sys.__stderr__

    if msg[0] == 'W':
        command = msg[1]
        if command == 'S':
            print('Stopping Explorer...')
            logging.warning("Stop command detected, Stopping.")
            comm_write('SS %s' % os.getpid())
            sys.exit(0)
        elif command == 'P':
            print('Pausing ...')
            logging.warning("Pause command detected, Pausing.")
            comm_write('PP %s' % os.getpid())
            while comm_read()[1] == 'P':
                logging.info('Waiting %i seconds ...' % conf['sleep_time'])
                print('Waiting %i seconds ...' % conf['sleep_time'])
                time.sleep(conf['sleep_time'])
                check_command()
        elif command == 'R':
            print('Resuming ...')
            logging.warning('Resuming.')
            comm_write('RR %s' % os.getpid())
Exemplo n.º 28
0
 def free(self, block):
     # free a block returned by malloc()
     # Since free() can be called asynchronously by the GC, it could happen
     # that it's called while self._lock is held: in that case,
     # self._lock.acquire() would deadlock (issue #12352). To avoid that, a
     # trylock is used instead, and if the lock can't be acquired
     # immediately, the block is added to a list of blocks to be freed
     # synchronously sometimes later from malloc() or free(), by calling
     # _free_pending_blocks() (appending and retrieving from a list is not
     # strictly thread-safe but under CPython it's atomic thanks to the GIL).
     if os.getpid() != self._lastpid:
         raise ValueError(
             "My pid ({0:n}) is not last pid {1:n}".format(
                 os.getpid(),self._lastpid))
     if not self._lock.acquire(False):
         # can't acquire the lock right now, add the block to the list of
         # pending blocks to free
         self._pending_free_blocks.append(block)
     else:
         # we hold the lock
         try:
             self._n_frees += 1
             self._free_pending_blocks()
             self._add_free_block(block)
             self._remove_allocated_block(block)
         finally:
             self._lock.release()
Exemplo n.º 29
0
def createProcess():  # Only works on Unix/Linux/Mac:
    print('Process (%s) start...' % os.getpid())
    pid = os.fork()
    if pid == 0:
        print('I am child process (%s) and my parent is %s.' % (os.getpid(), os.getppid()))
    else:   # return the subprocess id
        print('I (%s) just created a child process (%s).' % (os.getpid(), pid))
Exemplo n.º 30
0
 def getInstance(cls, *args, **kwargs):
     pid = os.getpid()
     if os.getpid() not in cls._instances:
         from MaKaC.common.logger import Logger
         Logger.get('dbmgr').debug('cls._instance is None')
         cls._instances[pid] = DBMgr(*args, **kwargs)
     return cls._instances[pid]
Exemplo n.º 31
0
def p2p_port(n):
    return 11000 + n + os.getpid()%999
Exemplo n.º 32
0
import os
pid = os.fork()
if pid == 0:
    print('111%d' % os.getpid())
else:
    print('222%d' % os.getpid())
Exemplo n.º 33
0
        hdlr.setFormatter(formatter)
        logger.addHandler(hdlr)
    else:
        ch = logging.StreamHandler()
        ch.setLevel(logging.DEBUG)
        ch.setFormatter(formatter)
        logger.addHandler(ch)
        logging.info("Running in test mode.")

    # PID management
    if pidfile and not test:
        if os.path.exists(pidfile):
            error("Already running (PID %s)." % open(pidfile).read())
        try:
            pid_fh = open(pidfile, 'w')
            pid_fh.write(str(os.getpid()))
            pid_fh.close()
        except IOError, why:
            error("Can't write PID file (%s)." % why)
    
    # run
    try:
        try:
            cm = Redirector(reactor, config, test)
            stdio.StandardIO(SquidHandlerProtocol(cm, test))
            cm.start()
        except IOError, why:
            error(why)
        except ConfigParser.Error, why:
            error("Configuration file: %s\n" % why)
    finally:
Exemplo n.º 34
0
    def __init__(
        self,
        save_dir=None,
        name='default',
        debug=False,
        version=None,
        autosave=False,
        description=None,
        create_git_tag=False,
        rank=0,
        *args, **kwargs
    ):
        """
        A new Experiment object defaults to 'default' unless a specific name is provided
        If a known name is already provided, then the file version is changed
        :param name:
        :param debug:
        """

        # change where the save dir is if requested

        if save_dir is not None:
            global _ROOT
            _ROOT = save_dir

        self.save_dir = save_dir
        self.tag_markdown_saved = False
        self.no_save_dir = save_dir is None
        self.metrics = []
        self.tags = {}
        self.name = name
        self.debug = debug
        self.version = version
        self.autosave = autosave
        self.description = description
        self.create_git_tag = create_git_tag
        self.exp_hash = '{}_v{}'.format(self.name, version)
        self.created_at = str(datetime.utcnow())
        self.rank = rank
        self.process = os.getpid()

        # when debugging don't do anything else
        if debug:
            return

        # update version hash if we need to increase version on our own
        # we will increase the previous version, so do it now so the hash
        # is accurate
        if version is None:
            old_version = self.__get_last_experiment_version()
            self.exp_hash = '{}_v{}'.format(self.name, old_version + 1)
            self.version = old_version + 1

        # create a new log file
        self.__init_cache_file_if_needed()

        # when we have a version, load it
        if self.version is not None:

            # when no version and no file, create it
            if not os.path.exists(self.__get_log_name()):
                self.__create_exp_file(self.version)
            else:
                # otherwise load it
                try:
                    self.__load()
                except Exception as e:
                    self.debug = True
        else:
            # if no version given, increase the version to a new exp
            # create the file if not exists
            old_version = self.__get_last_experiment_version()
            self.version = old_version
            self.__create_exp_file(self.version + 1)

        # create a git tag if requested
        if self.create_git_tag:
            desc = description if description is not None else 'no description'
            tag_msg = 'Test tube exp: {} - {}'.format(self.name, desc)
            cmd = 'git tag -a tt_{} -m "{}"'.format(self.exp_hash, tag_msg)
            os.system(cmd)
            print('Test tube created git tag:', 'tt_{}'.format(self.exp_hash))

        # set the tensorboardx log path to the /tf folder in the exp folder
        log_dir = self.get_tensorboardx_path(self.name, self.version)
        # this is a fix for pytorch 1.1 since it does not have this attribute
        for attr, val in [('purge_step', None),
                          ('max_queue', 10),
                          ('flush_secs', 120),
                          ('filename_suffix', '')]:
            if not hasattr(self, attr):
                setattr(self, attr, val)
        super().__init__(log_dir=log_dir, *args, **kwargs)
Exemplo n.º 35
0
def task(task_id):
    start_time = time.time()
    print 'task %s in process %s start on %s' % (task_id, os.getpid(), start_time)
    time.sleep(3)
    end_time = time.time()
    print 'task %s in process %s end on %s' % (task_id, os.getpid(), end_time)
Exemplo n.º 36
0
 def instance(cls):
     tid = threading.get_ident()
     pid = os.getpid()
     if not (pid, tid) in cls.__singleton_instances:
         cls.__singleton_instances[(pid, tid)] = cls()
     return cls.__singleton_instances[(pid, tid)]
Exemplo n.º 37
0
def rpc_port(n):
    return 12000 + n + os.getpid()%999
Exemplo n.º 38
0
 def __init__(self, name, args={}):
     self.name = name
     self.args = args
     self.pid = os.getpid()
Exemplo n.º 39
0
 def _info_file_default(self):
     info_file = "nbserver-%s.json"%os.getpid()
     return os.path.join(self.profile_dir.security_dir, info_file)
Exemplo n.º 40
0
	parser.add_option('-r', '--realm', action='store', dest='realm',
			default='realm', help='Realm for HTTP Proxy authorization')
	parser.add_option('-t', '--translate', action='store', dest='translate',
			metavar='HOSTNAME',
			help='Translate requests for this host name to localhost')
	parser.add_option('-f', '--fork', action='store_true',
			dest='fork', default=False, help='Fork daemon process')
	parser.add_option('-v', '--verbose', action='store_true',
			dest='verbose', default=False, help='Output verbose informations')
	(options, arguments) = parser.parse_args()

	httpd = BaseHTTPServer.HTTPServer(('', int(options.port)), Proxy)
	if options.fork:
		pid = os.fork()
		if pid == 0:
			for fd in range(3):
				os.close(fd)
				fd2 = os.open(os.devnull, fd == 0 and os.O_RDONLY or os.O_WRONLY)
				if fd2 != fd:
					os.dup2(fd2, fd)
					os.close(fd2)
			httpd.serve_forever()
		else:
			print("proxy_pid=%d proxy_port=%d" % (pid, httpd.server_port))
	else:
		try:
			print("proxy_pid=%d proxy_port=%d" % (os.getpid(), httpd.server_port))
			httpd.serve_forever()
		except KeyboardInterrupt:
			pass
Exemplo n.º 41
0
    def run(self, func, *args, **kwargs):

        r"""
        Run the indicated function with the given args and kwargs and return the value that the function
        returns.  If the time_out value expires, raise a ValueError exception with a detailed error message.

        This method passes all of the args and kwargs directly to the child function with the following
        important exception: If kwargs contains a 'time_out' value, it will be used to set the func timer
        object's time_out value and then the kwargs['time_out'] entry will be removed.  If the time-out
        expires before the function finishes running, this method will raise a ValueError.

        Example:
        func_timer = func_timer_class()
        func_timer.run(run_key, "sleep 3", time_out=2)

        Example:
        try:
            result = func_timer.run(func1, "parm1", time_out=2)
            print_var(result)
        except ValueError:
            print("The func timed out but we're handling it.")

        Description of argument(s):
        func                        The function object which is to be called.
        args                        The arguments which are to be passed to the function object.
        kwargs                      The keyword arguments which are to be passed to the function object.  As
                                    noted above, kwargs['time_out'] will get special treatment.
        """

        gp.lprint_executing()

        # Store method parms as object parms.
        self.__func = func

        # Get self.__time_out value from kwargs.  If kwargs['time_out'] is not present, self.__time_out will
        # default to None.
        self.__time_out = None
        if 'time_out' in kwargs:
            self.__time_out = kwargs['time_out']
            del kwargs['time_out']
            # Convert "none" string to None.
            try:
                if self.__time_out.lower() == "none":
                    self.__time_out = None
            except AttributeError:
                pass
            if self.__time_out is not None:
                self.__time_out = int(self.__time_out)
                # Ensure that time_out is non-negative.
                message = gv.valid_range(self.__time_out, 0,
                                         var_name="time_out")
                if message != "":
                    raise ValueError("\n"
                                     + gp.sprint_error_report(message,
                                                              format='long'))

        gp.lprint_varx("time_out", self.__time_out)
        self.__child_pid = 0
        if self.__time_out is not None:
            # Save the original SIGUSR1 handler for later restoration by this class' methods.
            self.__original_SIGUSR1_handler = signal.getsignal(signal.SIGUSR1)
            # Designate a SIGUSR1 handling function.
            signal.signal(signal.SIGUSR1, self.timed_out)
            parent_pid = os.getpid()
            self.__child_pid = os.fork()
            if self.__child_pid == 0:
                gp.dprint_timen("Child timer pid " + str(os.getpid())
                                + ": Sleeping for " + str(self.__time_out)
                                + " seconds.")
                time.sleep(self.__time_out)
                gp.dprint_timen("Child timer pid " + str(os.getpid())
                                + ": Sending SIGUSR1 to parent pid "
                                + str(parent_pid) + ".")
                os.kill(parent_pid, signal.SIGUSR1)
                os._exit(0)

        # Call the user's function with the user's arguments.
        children = gm.get_child_pids()
        gp.lprint_var(children)
        gp.lprint_timen("Calling the user's function.")
        gp.lprint_varx("func_name", func.__name__)
        gp.lprint_vars(args, kwargs)
        try:
            result = func(*args, **kwargs)
        except Exception as func_exception:
            # We must handle all exceptions so that we have the chance to cleanup before re-raising the
            # exception.
            gp.lprint_timen("Encountered exception in user's function.")
            self.cleanup()
            raise(func_exception)
        gp.lprint_timen("Returned from the user's function.")

        self.cleanup()

        return result
Exemplo n.º 42
0
class Tracing(object):
    _trace_events = [
        {
            'name': 'process_name',
            'ph': _TraceEventPhases.METADATA,
            'pid': os.getpid(),
            'args': {'name': 'buck.py'}
        }
    ]

    def __init__(self, name, args={}):
        self.name = name
        self.args = args
        self.pid = os.getpid()

    def __enter__(self):
        now_us = monotonic_time_nanos() / 1000
        self._add_trace_event(
            'buck-launcher',
            self.name,
            _TraceEventPhases.BEGIN,
            self.pid,
            1,
            now_us,
            self.args)

    def __exit__(self, x_type, x_value, x_traceback):
        now_us = monotonic_time_nanos() / 1000
        self._add_trace_event(
            'buck-launcher',
            self.name,
            _TraceEventPhases.END,
            self.pid,
            1,
            now_us,
            self.args)

    @staticmethod
    def _add_trace_event(
            category,
            name,
            phase,
            pid,
            tid,
            ts,
            args):
        Tracing._trace_events.append({
            'cat': category,
            'name': name,
            'ph': phase,
            'pid': pid,
            'tid': tid,
            'ts': ts,
            'args': args})

    @staticmethod
    def write_to_dir(buck_log_dir, build_id):
        filename_time = time.strftime('%Y-%m-%d.%H-%M-%S')
        trace_filename = os.path.join(
            buck_log_dir, 'launch.{0}.{1}.trace'.format(filename_time, build_id))
        trace_filename_link = os.path.join(buck_log_dir, 'launch.trace')
        try:
            os.makedirs(buck_log_dir)
        except OSError as e:
            if e.errno != errno.EEXIST:
                raise
        with open(trace_filename, 'w') as f:
            json.dump(Tracing._trace_events, f)

        create_symlink(trace_filename, trace_filename_link)
        Tracing.clean_up_old_logs(buck_log_dir)

    @staticmethod
    def clean_up_old_logs(buck_log_dir, logs_to_keep=25):
        traces = filter(
            os.path.isfile,
            glob.glob(os.path.join(buck_log_dir, 'launch.*.trace')),
        )
        try:
            traces = sorted(traces, key=os.path.getmtime)
            for f in traces[:-logs_to_keep]:
                os.remove(f)
        except OSError:
            return  # a concurrent run cleaned up the logs
Exemplo n.º 43
0
    just_draw = False
else:
    just_draw = True
if 'false' in save_hdf:
    save_hdf = False
else:
    save_hdf = True

ins_conf_file = sat.upper() + '_' + ins.upper() + '_CONF'
ins_conf = __import__(ins_conf_file)

if sat not in conf.support_sat_ins or ins not in conf.support_sat_ins[sat]:
    print 'sat or ins setting is NOT found in conf.py'
    sys.exit(0)

pid = os.getpid()
fname = os.path.splitext(os.path.basename(os.path.realpath(__file__)))[0]
log_tag = fname + '.' + sat + '.' + ins + '.' + str(pid)
my_name = common.get_local_hostname()
my_tag = my_name + '.' + log_tag
my_pidfile = conf.pid_path + '/' + my_name + '.' + fname + '.' + sat + '.' \
            + ins + '.pid'
my_alivefile = conf.pid_path + '/' + my_name + '.' + fname + '.' + sat + '.' \
            + ins + '.alive'
my_log = conf.log_path + '/' + my_name + '.'  # just prefix: /log/path/prefix.

#get the correct time span.
if calc_date == 'now':
    #calc_date = common.utc_YmdH()
    calc_date = datetime.now() + timedelta(days=-3)
    calc_date = calc_date.strftime('%Y-%m-%d-%H')
Exemplo n.º 44
0
 def make_tmpdir():
     return os.path.join(gettempdir(), str(os.getpid()))
Exemplo n.º 45
0
def kill_app(signal_int, call_back):
    '''Kill main thread
    '''
    os.kill(os.getpid(), signal.SIGTERM)
Exemplo n.º 46
0
    def main(self):
        """Main function. This should not be overridden by the subclass test scripts."""

        parser = optparse.OptionParser(usage="%prog [options]")
        parser.add_option(
            "--nocleanup",
            dest="nocleanup",
            default=False,
            action="store_true",
            help="Leave unit-e daemons and test.* datadir on exit or error")
        parser.add_option(
            "--noshutdown",
            dest="noshutdown",
            default=False,
            action="store_true",
            help="Don't stop unit-e daemons after the test execution")
        parser.add_option(
            "--srcdir",
            dest="srcdir",
            default=os.path.normpath(
                os.path.dirname(os.path.realpath(__file__)) + "/../../../src"),
            help=
            "Source directory containing unit-e/unit-e-cli (default: %default)"
        )
        parser.add_option("--cachedir",
                          dest="cachedir",
                          default=os.path.normpath(
                              os.path.dirname(os.path.realpath(__file__)) +
                              "/../../cache"),
                          help="Directory for caching pregenerated datadirs")
        parser.add_option("--tmpdir",
                          dest="tmpdir",
                          help="Root directory for datadirs")
        parser.add_option(
            "-l",
            "--loglevel",
            dest="loglevel",
            default="INFO",
            help=
            "log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory."
        )
        parser.add_option("--tracerpc",
                          dest="trace_rpc",
                          default=False,
                          action="store_true",
                          help="Print out all RPC calls as they are made")
        parser.add_option(
            "--portseed",
            dest="port_seed",
            default=os.getpid(),
            type='int',
            help=
            "The seed to use for assigning port numbers (default: current process id)"
        )
        parser.add_option("--coveragedir",
                          dest="coveragedir",
                          help="Write tested RPC commands into this directory")
        parser.add_option("--configfile",
                          dest="configfile",
                          help="Location of the test framework config file")
        parser.add_option("--pdbonfailure",
                          dest="pdbonfailure",
                          default=False,
                          action="store_true",
                          help="Attach a python debugger if test fails")
        parser.add_option(
            "--usecli",
            dest="usecli",
            default=False,
            action="store_true",
            help="use unit-e-cli instead of RPC for all commands")
        self.add_options(parser)
        (self.options, self.args) = parser.parse_args()

        PortSeed.n = self.options.port_seed

        os.environ['PATH'] = self.options.srcdir + ":" + os.environ['PATH']

        check_json_precision()

        self.options.cachedir = os.path.abspath(self.options.cachedir)

        # Set up temp directory and start logging
        if self.options.tmpdir:
            self.options.tmpdir = os.path.abspath(self.options.tmpdir)
            os.makedirs(self.options.tmpdir, exist_ok=False)
        else:
            self.options.tmpdir = tempfile.mkdtemp(prefix="test")
        self._start_logging()

        success = TestStatus.FAILED

        try:
            if self.options.usecli and not self.supports_cli:
                raise SkipTest(
                    "--usecli specified but test does not support using CLI")
            self.setup_chain()
            self.setup_network()
            self.run_test()
            success = TestStatus.PASSED
        except JSONRPCException as e:
            self.log.exception("JSONRPC error")
        except SkipTest as e:
            self.log.warning("Test Skipped: %s" % e.message)
            success = TestStatus.SKIPPED
        except AssertionError as e:
            self.log.exception("Assertion failed")
        except KeyError as e:
            self.log.exception("Key error")
        except Exception as e:
            self.log.exception("Unexpected exception caught during testing")
        except KeyboardInterrupt as e:
            self.log.warning("Exiting after keyboard interrupt")

        if success == TestStatus.FAILED and self.options.pdbonfailure:
            print(
                "Testcase failed. Attaching python debugger. Enter ? for help")
            pdb.set_trace()

        if not self.options.noshutdown:
            self.log.info("Stopping nodes")
            if self.nodes:
                self.stop_nodes()
        else:
            for node in self.nodes:
                node.cleanup_on_exit = False
            self.log.info(
                "Note: unit-e daemons were not stopped and may still be running"
            )

        if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
            self.log.info("Cleaning up")
            shutil.rmtree(self.options.tmpdir)
        else:
            self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)

        if success == TestStatus.PASSED:
            self.log.info("Tests successful")
            exit_code = TEST_EXIT_PASSED
        elif success == TestStatus.SKIPPED:
            self.log.info("Test skipped")
            exit_code = TEST_EXIT_SKIPPED
        else:
            self.log.error(
                "Test failed. Test logging available at %s/test_framework.log",
                self.options.tmpdir)
            self.log.error("Hint: Call {} '{}' to consolidate all logs".format(
                os.path.normpath(
                    os.path.dirname(os.path.realpath(__file__)) +
                    "/../combine_logs.py"), self.options.tmpdir))
            exit_code = TEST_EXIT_FAILED
        logging.shutdown()
        sys.exit(exit_code)
Exemplo n.º 47
0
    def parse_args(self):
        parser = argparse.ArgumentParser(usage="%(prog)s [options]")
        parser.add_argument(
            "--nocleanup",
            dest="nocleanup",
            default=False,
            action="store_true",
            help="Leave bitcoinds and test.* datadir on exit or error")
        parser.add_argument(
            "--noshutdown",
            dest="noshutdown",
            default=False,
            action="store_true",
            help="Don't stop bitcoinds after the test execution")
        parser.add_argument(
            "--cachedir",
            dest="cachedir",
            default=os.path.abspath(
                os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
            help=
            "Directory for caching pregenerated datadirs (default: %(default)s)"
        )
        parser.add_argument("--tmpdir",
                            dest="tmpdir",
                            help="Root directory for datadirs")
        parser.add_argument(
            "-l",
            "--loglevel",
            dest="loglevel",
            default="INFO",
            help=
            "log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory."
        )
        parser.add_argument("--tracerpc",
                            dest="trace_rpc",
                            default=False,
                            action="store_true",
                            help="Print out all RPC calls as they are made")
        parser.add_argument(
            "--portseed",
            dest="port_seed",
            default=os.getpid(),
            type=int,
            help=
            "The seed to use for assigning port numbers (default: current process id)"
        )
        parser.add_argument(
            "--coveragedir",
            dest="coveragedir",
            help="Write tested RPC commands into this directory")
        parser.add_argument(
            "--configfile",
            dest="configfile",
            default=os.path.abspath(
                os.path.dirname(os.path.realpath(__file__)) +
                "/../../config.ini"),
            help=
            "Location of the test framework config file (default: %(default)s)"
        )
        parser.add_argument("--pdbonfailure",
                            dest="pdbonfailure",
                            default=False,
                            action="store_true",
                            help="Attach a python debugger if test fails")
        parser.add_argument(
            "--usecli",
            dest="usecli",
            default=False,
            action="store_true",
            help="use bitcoin-cli instead of RPC for all commands")
        parser.add_argument(
            "--perf",
            dest="perf",
            default=False,
            action="store_true",
            help="profile running nodes with perf for the duration of the test"
        )
        parser.add_argument(
            "--valgrind",
            dest="valgrind",
            default=False,
            action="store_true",
            help=
            "run nodes under the valgrind memory error detector: expect at least a ~10x slowdown, valgrind 3.14 or later required"
        )
        parser.add_argument(
            "--randomseed",
            type=int,
            help=
            "set a random seed for deterministically reproducing a previous test run"
        )
        parser.add_argument(
            "--with-axionactivation",
            dest="axionactivation",
            default=False,
            action="store_true",
            help="Activate axion update on timestamp {}".format(
                TIMESTAMP_IN_THE_PAST))

        self.add_options(parser)
        self.options = parser.parse_args()
Exemplo n.º 48
0
def sndrcv(pks, pkt, timeout = None, inter = 0, verbose=None, chainCC=0, retry=0, multi=0):
    if not isinstance(pkt, Gen):
        pkt = SetGen(pkt)
        
    if verbose is None:
        verbose = conf.verb
    debug.recv = plist.PacketList([],"Unanswered")
    debug.sent = plist.PacketList([],"Sent")
    debug.match = plist.SndRcvList([])
    nbrecv=0
    ans = []
    # do it here to fix random fields, so that parent and child have the same
    all_stimuli = tobesent = [p for p in pkt]
    notans = len(tobesent)

    hsent={}
    for i in tobesent:
        h = i.hashret()
        if h in hsent:
            hsent[h].append(i)
        else:
            hsent[h] = [i]
    if retry < 0:
        retry = -retry
        autostop=retry
    else:
        autostop=0


    while retry >= 0:
        found=0
    
        if timeout < 0:
            timeout = None
            
        rdpipe,wrpipe = os.pipe()
        rdpipe=os.fdopen(rdpipe)
        wrpipe=os.fdopen(wrpipe,"w")

        pid=1
        try:
            pid = os.fork()
            if pid == 0:
                try:
                    sys.stdin.close()
                    rdpipe.close()
                    try:
                        i = 0
                        if verbose:
                            print "Begin emission:"
                        for p in tobesent:
                            pks.send(p)
                            i += 1
                            time.sleep(inter)
                        if verbose:
                            print "Finished to send %i packets." % i
                    except SystemExit:
                        pass
                    except KeyboardInterrupt:
                        pass
                    except:
                        log_runtime.exception("--- Error in child %i" % os.getpid())
                        log_runtime.info("--- Error in child %i" % os.getpid())
                finally:
                    try:
                        os.setpgrp() # Chance process group to avoid ctrl-C
                        sent_times = [p.sent_time for p in all_stimuli if p.sent_time]
                        cPickle.dump( (conf.netcache,sent_times), wrpipe )
                        wrpipe.close()
                    except:
                        pass
            elif pid < 0:
                log_runtime.error("fork error")
            else:
                wrpipe.close()
                stoptime = 0
                remaintime = None
                inmask = [rdpipe,pks]
                try:
                    try:
                        while 1:
                            if stoptime:
                                remaintime = stoptime-time.time()
                                if remaintime <= 0:
                                    break
                            r = None
                            if conf.use_bpf:
                                from scapy.arch.bpf.supersocket import bpf_select
                                inp = bpf_select(inmask)
                                if pks in inp:
                                    r = pks.recv()
                            elif not isinstance(pks, StreamSocket) and (FREEBSD or DARWIN or OPENBSD):
                                inp, out, err = select(inmask,[],[], 0.05)
                                if len(inp) == 0 or pks in inp:
                                    r = pks.nonblock_recv()
                            else:
                                inp = []
                                try:
                                    inp, out, err = select(inmask,[],[], remaintime)
                                except IOError, exc:
                                    if exc.errno != errno.EINTR:
                                        raise
                                if len(inp) == 0:
                                    break
                                if pks in inp:
                                    r = pks.recv(MTU)
                            if rdpipe in inp:
                                if timeout:
                                    stoptime = time.time()+timeout
                                del(inmask[inmask.index(rdpipe)])
                            if r is None:
                                continue
                            ok = 0
                            h = r.hashret()
                            if h in hsent:
                                hlst = hsent[h]
                                for i, sentpkt in enumerate(hlst):
                                    if r.answers(sentpkt):
                                        ans.append((sentpkt, r))
                                        if verbose > 1:
                                            os.write(1, "*")
                                        ok = 1
                                        if not multi:
                                            del hlst[i]
                                            notans -= 1
                                        else:
                                            if not hasattr(sentpkt, '_answered'):
                                                notans -= 1
                                            sentpkt._answered = 1
                                        break
                            if notans == 0 and not multi:
                                break
                            if not ok:
                                if verbose > 1:
                                    os.write(1, ".")
                                nbrecv += 1
                                if conf.debug_match:
                                    debug.recv.append(r)
                    except KeyboardInterrupt:
                        if chainCC:
                            raise
                finally:
                    try:
                        nc,sent_times = cPickle.load(rdpipe)
                    except EOFError:
                        warning("Child died unexpectedly. Packets may have not been sent %i"%os.getpid())
                    else:
                        conf.netcache.update(nc)
                        for p,t in zip(all_stimuli, sent_times):
                            p.sent_time = t
                    os.waitpid(pid,0)
        finally:
            if pid == 0:
                os._exit(0)

        remain = list(itertools.chain(*hsent.itervalues()))
        if multi:
            remain = [p for p in remain if not hasattr(p, '_answered')]

        if autostop and len(remain) > 0 and len(remain) != len(tobesent):
            retry = autostop
            
        tobesent = remain
        if len(tobesent) == 0:
            break
        retry -= 1
        
    if conf.debug_match:
        debug.sent=plist.PacketList(remain[:],"Sent")
        debug.match=plist.SndRcvList(ans[:])

    #clean the ans list to delete the field _answered
    if (multi):
        for s,r in ans:
            if hasattr(s, '_answered'):
                del(s._answered)
    
    if verbose:
        print "\nReceived %i packets, got %i answers, remaining %i packets" % (nbrecv+len(ans), len(ans), notans)
    return plist.SndRcvList(ans),plist.PacketList(remain,"Unanswered")
Exemplo n.º 49
0
def run_proc(name):
    print("Run child process %s (%s)..."%(name,os.getpid()))
Exemplo n.º 50
0
def connect(dbapi_connection, connection_record):
    connection_record.info['pid'] = os.getpid()
Exemplo n.º 51
0
import random
import time
from math import *
from numpy import *
#from numpy import numarray
#from numarray import *
import numpy.numarray.linear_algebra as la
import numpy.linalg as la_new
from os import listdir
from os import getcwd
import os

#FROM python cookbock Memory monitor!
import os

_proc_status = '/proc/%d/status' % os.getpid()

_scale = {'kB': 1024.0, 'mB': 1024.0*1024.0,
          'KB': 1024.0, 'MB': 1024.0*1024.0}

def _VmB(VmKey):
    '''Private.
    '''
    global _proc_status, _scale
     # get pseudo file  /proc/<pid>/status
    try:
        t = open(_proc_status)
        v = t.read()
        t.close()
    except:
        return 0.0  # non-Linux?
Exemplo n.º 52
0
	def initOverlayClass(self):
		# #4264: the caret_newLine script can only be bound for processes other than NVDA's process
		# As Pressing enter on an edit field can cause modal dialogs to appear, yet gesture.send and api.processPendingEvents may call.wx.yield which ends in a freeze. 
		if self.announceNewLineText and self.processID!=os.getpid():
			self.bindGesture("kb:enter","caret_newLine")
			self.bindGesture("kb:numpadEnter","caret_newLine")
Exemplo n.º 53
0
模块:
_thread:    接近底层,封装C语言模块
threading:  高级,对_thread进行封装

任何一个进程都会默认启动一个线程,这个线程称为主线程
'''

import threading,os,time

def get():
    print('haha---{}--{}'.format(threading.current_thread(),threading.current_thread().name))



if __name__ =='__main__':
    print('主进程启动,{}'.format(os.getpid()))
    print('主线程--{}'.format(threading.current_thread()))

    #创建子线程
    p = threading.Thread(target=get,name='th1')
    q = threading.Thread(target=get,name='th2')

    p.start()
    q.start()

    #最好都等待线程结束
    p.join()
    q.join()

    print('主线程结束')
Exemplo n.º 54
0
# @author Sam
# @date 2018-01-16
# desc 再学Python的线程与进程(一)
# Windows下创建单个进程

from multiprocessing import Process
import os

def run_proc(name):
    print("Run child process %s (%s)..."%(name,os.getpid()))

if __name__ == '__main__':
    print('Parent process %s.' % os.getpid())
    p = Process(target=run_proc, args=('test',))
    print("Child process wil start.")
    p.start()
    # 等待子进程结束后继续往下执行
    p.join()
    print('Child process end.')
Exemplo n.º 55
0
def start_loop():
    global _loc
    global tasks
    global t_time
    global stop
    global node_id

    print('\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\n')

    node_id = mec_id(ip_address())
    # print('node id: ', node_id)
    _threads_ = [receive_offloaded_task_mec, call_execute_re_offload, connect_to_broker]
    for i in _threads_:
        thread_record.append(Thread(target=i))
        Thread(target=i).daemon = True
        Thread(target=i).start()

    x = gp.getpass('Press any key to Start...').lower()
    if x != 'exit':
        print('========= Waiting for tasks ==========')
        _time_ = dt.datetime.now()
        while True:
            try:
                if len(received_task_queue) > 0:
                    info = received_task_queue.pop(0)
                    tasks, t_time = info

                    print('EDF List of Processes: ', tasks, '\n')

                    print('\n========= Running Deadlock Algorithm ===========')
                    a = load_tasks()
                    list_seq = get_exec_seq(scheduler(a))
                    if len(list_seq) > 0:  # do only when there is a task in safe sequence
                        wait_list = calc_wait_time(list_seq)
                        print('\nWaiting Time List: ', wait_list)
                        compare_result = compare_local_mec(wait_list)
                        print('\nExecute Locally: ', compare_result[1])
                        _loc += len(compare_result[1])  # total number of tasks to be executed locally
                        print('\nExecute in MEC: ', compare_result[0])

                        if len(compare_result[0]) > 0:
                            print('\nSending to cooperative platform')
                            cooperative_mec(compare_result[0])
                        execute(compare_result[1])
                        show_graphs()
                    _time_ = dt.datetime.now()
                else:
                    send_message(str('wt {} 0.0'.format(ip_address())))
                    time.sleep(.5)
                    now = dt.datetime.now()
                    delta = now - _time_
                    if delta > dt.timedelta(minutes=3):
                        print('terminating programme 5 mins elapsed')
                        _id_ = get_hostname()[-1]
                        result = f"wt{_id_}_2_{mec_no} = {mec_waiting_time} " \
                                 f"\nrtt{_id_}_2_{mec_no} = {mec_rtt} \ncpu{_id_}_2_{mec_no} = {_cpu} " \
                                 f"\noff_mec{_id_}_2_{mec_no} = {_off_mec} " \
                                 f"\noff_cloud{_id_}_2_{mec_no} = {_off_cloud} " \
                                 f"\ninward_mec{_id_}_2_{mec_no} = {_inward_mec}" \
                                 f"\nloc{_id_}_2_{mec_no} = {_loc} " \
                                 f"\ndeadlock{_id_}_2_{mec_no} = {deadlock} \nmemory{_id_}_2_{mec_no} = {memory}"
                        list_result = [
                            f"wt{_id_}_2_{mec_no} = {mec_waiting_time} ",
                            f"\nrtt{_id_}_2_{mec_no} = {mec_rtt} \ncpu{_id_}_2_{mec_no} = {_cpu} ",
                            f"\noff_mec{_id_}_2_{mec_no} = {_off_mec} \noff_cloud{_id_}_2_{mec_no} = {_off_cloud} ",
                            f"\ninward_mec{_id_}_2_{mec_no} = {_inward_mec}",
                            f"\nloc{_id_}_2_{mec_no} = {_loc} ",
                            f"\ndeadlock{_id_}_2_{mec_no} = {deadlock} \nmemory{_id_}_2_{mec_no} = {memory}"
                        ]
                        for i in list_result:
                            cmd = 'echo "{}" >> data.py'.format(i)
                            os.system(cmd)
                            os.system('echo "{}" >> /home/mec/result/data.py'.format(i))

                        send_email(result)
                        stop += 1
                        '''
                        for i in thread_record:
                            i.join()
                        '''
                        _client.loop_stop()
                        time.sleep(1)
                        print('done')
                        os.system('kill -9 {}'.format(os.getpid()))
                        break
            except KeyboardInterrupt:
                print('\nProgramme Terminated')
                _id_ = get_hostname()[-1]
                result = f"wt{_id_}_2_{mec_no} = {mec_waiting_time} " \
                         f"\nrtt{_id_}_2_{mec_no} = {mec_rtt} \ncpu{_id_}_2_{mec_no} = {_cpu} " \
                         f"\noff_mec{_id_}_2_{mec_no} = {_off_mec} \noff_cloud{_id_}_2_{mec_no} = {_off_cloud} " \
                         f"\ninward_mec{_id_}_2_{mec_no} = {_inward_mec}" \
                         f"\nloc{_id_}_2_{mec_no} = {_loc} " \
                         f"\ndeadlock{_id_}_2_{mec_no} = {deadlock} \nmemory{_id_}_2_{mec_no} = {memory}"
                list_result = [
                    f"wt{_id_}_2_{mec_no} = {mec_waiting_time} ",
                    f"\nrtt{_id_}_2_{mec_no} = {mec_rtt} \ncpu{_id_}_2_{mec_no} = {_cpu} ",
                    f"\noff_mec{_id_}_2_{mec_no} = {_off_mec} \noff_cloud{_id_}_2_{mec_no} = {_off_cloud} ",
                    f"\ninward_mec{_id_}_2_{mec_no} = {_inward_mec}",
                    f"\nloc{_id_}_2_{mec_no} = {_loc} ",
                    f"\ndeadlock{_id_}_2_{mec_no} = {deadlock} \nmemory{_id_}_2_{mec_no} = {memory}"
                ]
                for i in list_result:
                    cmd = 'echo "{}" >> data.py'.format(i)
                    os.system(cmd)
                    os.system('echo "{}" >> /home/mec/result/data.py'.format(i))

                send_email(result)
                stop += 1
                '''
                for i in thread_record:
                    i.join()
                '''
                _client.loop_stop()
                time.sleep(1)
                print('done')
                os.system('kill -9 {}'.format(os.getpid()))
                break
Exemplo n.º 56
0
def ensure_type(value, value_type):
    ''' return a configuration variable with casting
    :arg value: The value to ensure correct typing of
    :kwarg value_type: The type of the value.  This can be any of the following strings:
        :boolean: sets the value to a True or False value
        :integer: Sets the value to an integer or raises a ValueType error
        :float: Sets the value to a float or raises a ValueType error
        :list: Treats the value as a comma separated list.  Split the value
            and return it as a python list.
        :none: Sets the value to None
        :path: Expands any environment variables and tilde's in the value.
        :tmp_path: Create a unique temporary directory inside of the directory
            specified by value and return its path.
        :pathlist: Treat the value as a typical PATH string.  (On POSIX, this
            means colon separated strings.)  Split the value and then expand
            each part for environment variables and tildes.
    '''
    if value_type:
        value_type = value_type.lower()

    if value_type in ('boolean', 'bool'):
        value = boolean(value, strict=False)

    elif value:
        if value_type in ('integer', 'int'):
            value = int(value)

        elif value_type == 'float':
            value = float(value)

        elif value_type == 'list':
            if isinstance(value, string_types):
                value = [x.strip() for x in value.split(',')]

        elif value_type == 'none':
            if value == "None":
                value = None

        elif value_type == 'path':
            value = resolve_path(value)

        elif value_type in ('tmp', 'temppath', 'tmppath'):
            value = resolve_path(value)
            if not os.path.exists(value):
                makedirs_safe(value, 0o700)
            prefix = 'ansible-local-%s' % os.getpid()
            value = tempfile.mkdtemp(prefix=prefix, dir=value)

        elif value_type == 'pathspec':
            if isinstance(value, string_types):
                value = value.split(os.pathsep)
            value = [resolve_path(x) for x in value]

        elif value_type == 'pathlist':
            if isinstance(value, string_types):
                value = value.split(',')
            value = [resolve_path(x) for x in value]

        # defaults to string types
        elif isinstance(value, string_types):
            value = unquote(value)

    return to_text(value, errors='surrogate_or_strict', nonstring='passthru')
Exemplo n.º 57
0
def download_flickr_video(url_queue,
                          cv_main_to_worker,
                          cv_worker_to_main,
                          data_dir,
                          ffmpeg_path,
                          ffprobe_path,
                          is_retry=False,
                          audio_output_path=None,
                          video_output_path=None,
                          **ffmpeg_cfg):
    """
    Downloads a Flickr video, retrying with an alternate URL if the given
    URL is invalid

    Args:
        url:           Flickr video URL
                       (Type: str)

        data_dir:      Output directory where video will be saved if output
                       path is not explicitly given
                       (Type: str)

        ffmpeg_path:   Path to ffmpeg executable
                       (Type: str)

        ffprobe_path:  Path to ffprobe executable
                       (Type: str)

    Keyword Args:
        is_retry:         If True, this invocation of the function is for retrying
                          a video download with an alternate URL. Should not be set
                          by user.
                          (Type: bool)

        **ffmpeg_cfg:  Download arguments used by ffmpeg

    """
    global stop
    signal.signal(signal.SIGINT, stop_handler)
    signal.signal(signal.SIGINT, stop_handler)
    url = None
    while True:
        while True:
            try:
                if stop:
                    LOGGER.info('Worker process {} stopped'.format(
                        os.getpid()))
                    return
                url = url_queue.get(False)
                break
            except queue.Empty:
                with cv_main_to_worker:
                    cv_main_to_worker.wait(3.0)
        with cv_worker_to_main:
            cv_worker_to_main.notify()
        if url == "#END#":
            return
        LOGGER.info('Attempting to download video from "{}"'.format(url))
        try:
            download_video(url,
                           ffmpeg_path,
                           ffprobe_path,
                           data_dir=data_dir,
                           video_output_path=video_output_path,
                           audio_output_path=audio_output_path,
                           **ffmpeg_cfg)
        except Exception as e:
            LOGGER.error(e)
            flickr_id = extract_flickr_id(url)
            err_msg = 'Could not download video with Flickr ID {}'.format(
                flickr_id)
            LOGGER.error(err_msg)
Exemplo n.º 58
0
 def __getitem__(self, index):
     pid = os.getpid() % self.WORKER_SIZE
     batch = opcaffe.Batch()
     self.workers[pid].load(batch)
     return torch.tensor(batch.data), torch.tensor(batch.label)
Exemplo n.º 59
0
def download_file(url, file_name, debug, socks_proxy, socks_port, timeout):
    process_id = os.getpid()
    try:
        real_size = -1
        partial_dl = 0
        dlded_size = 0
    
        u = open_url(url, socks_proxy, socks_port, timeout, data=None, range_header=None)
        #print("url: %s" % url)
        if not u:
            return -1

        if not file_name:
            file_name = u.info().get_filename()

        if os.path.exists(file_name):
            dlded_size = os.path.getsize(file_name)
        if (dlded_size <= 8192):
            # we may have downloaded an "Exceed the download limit" (Превышение лимита скачивания) page 
            # instead of the song, restart at beginning.
            dlded_size = 0

        i = 0
        while (i < 5):
            try:
                real_size = int(u.info()['content-length'])
                if real_size <= 1024:
                   # we got served an "Exceed the download limit" (Превышение лимита скачивания) page, 
                   # retry without incrementing counter (for musicmp3spb)
                   color_message("** File size too small (<1024), might be an error, please verify manually **", "lightyellow")
                break
            except Exception as e:
                if (i == 4):
                    color_message("** Unable to get the real size of %s from the server because: %s. **" 
                                  % (file_name, str(e)), "lightyellow")
                    break # real_size == -1
                else:
                    i += 1
                    if debug: print("%s problem while getting content-length: %s, retrying" 
                                    % (process_id, str(e)), file=sys.stderr)
                    continue

        # find where to start the file download (continue or start at beginning)
        if (0 < dlded_size < real_size):
            # file incomplete, we need to resume download
            u.close()
            
            range_header = 'bytes=%s-%s' % (dlded_size, real_size)
            data = None
            u = open_url(url, socks_proxy, socks_port, timeout, data, range_header)
            if not u: return -1
    
            # test if the server supports the Range header
            if (u.getcode() == 206):
                partial_dl = 1
            else:
                color_message("** Range/partial download is not supported by server, restarting download at beginning **", "lightyellow")
                dlded_size = 0
        elif (dlded_size == real_size):
            # file already completed, skipped
            color_message("%s (skipped)" % dl_status(file_name, dlded_size, real_size), "lightgreen")
            u.close()
            return
        elif (dlded_size > real_size):
            # we got a problem, restart download
            color_message("** Downloaded size (%s) bigger than the real size (%s) of %s. Either real size could not be found or an other problem occured, retrying **" % (dlded_size,real_size,file_name), "lightyellow")
            u.close()
            return -1

        # append or truncate
        if partial_dl:
            f = open(file_name, 'ab+')
        else:
            f = open(file_name, 'wb+')

        # get the file
        block_sz = 8192
        #spin = spinning_wheel()
        while True:
            buffer = u.read(block_sz)
            if not buffer:
                break

            dlded_size += len(buffer)
            f.write(buffer)

            # show progress
            #sys.stdout.write(next(spin))
            #sys.stdout.flush()
            #time.sleep(0.1)
            #sys.stdout.write('\b')
    
        if (real_size == -1): 
            real_size = dlded_size
            color_message("%s (file downloaded, but could not verify if it is complete)" 
                   % dl_status(file_name, dlded_size, real_size), "lightyellow")
        elif (real_size == dlded_size):
            color_message("%s" # file downloaded and complete
                   % dl_status(file_name, dlded_size, real_size), "lightgreen")
        elif (dlded_size < real_size):
            color_message("%s (file download incomplete, retrying)" 
                   % dl_status(file_name, dlded_size, real_size), "lightyellow")
            u.close()
            f.close()
            return -1

        #sys.stdout.write('\n')
        u.close()
        f.close()
    except KeyboardInterrupt as e:
        if debug: print("** %s : download_file: keyboard interrupt detected **" % process_id, file=sys.stderr)
        raise e
    except Exception as e:
        color_message('** Exception caught in download_file(%s,%s) with error: "%s". We will continue anyway. **' 
               % (url, file_name, str(e)), "lightyellow")
        traceback.print_stack(file=sys.stderr)
        pass
Exemplo n.º 60
0
def rangefilter(request, oldresponse):
    if oldresponse.stream is None:
        return oldresponse
    size = oldresponse.stream.length
    if size is None:
        # Does not deal with indeterminate length outputs
        return oldresponse

    oldresponse.headers.setHeader('accept-ranges',('bytes',))
    
    rangespec = request.headers.getHeader('range')
    
    # If we've got a range header and the If-Range header check passes, and
    # the range type is bytes, do a partial response.
    if (rangespec is not None and http.checkIfRange(request, oldresponse) and
        rangespec[0] == 'bytes'):
        # If it's a single range, return a simple response
        if len(rangespec[1]) == 1:
            try:
                start,end = canonicalizeRange(rangespec[1][0], size)
            except UnsatisfiableRangeRequest:
                return makeUnsatisfiable(request, oldresponse)

            response = http.Response(responsecode.PARTIAL_CONTENT, oldresponse.headers)
            response.headers.setHeader('content-range',('bytes',start, end, size))
            
            content, after = makeSegment(oldresponse.stream, 0, start, end)
            after.close()
            response.stream = content
            return response
        else:
            # Return a multipart/byteranges response
            lastOffset = -1
            offsetList = []
            for arange in rangespec[1]:
                try:
                    start,end = canonicalizeRange(arange, size)
                except UnsatisfiableRangeRequest:
                    continue
                if start <= lastOffset:
                    # Stupid client asking for out-of-order or overlapping ranges, PUNT!
                    return oldresponse
                offsetList.append((start,end))
                lastOffset = end

            if not offsetList:
                return makeUnsatisfiable(request, oldresponse)
            
            content_type = oldresponse.headers.getRawHeaders('content-type')
            boundary = "%x%x" % (int(time.time()*1000000), os.getpid())
            response = http.Response(responsecode.PARTIAL_CONTENT, oldresponse.headers)
            
            response.headers.setHeader('content-type',
                http_headers.MimeType('multipart', 'byteranges',
                                      [('boundary', boundary)]))
            response.stream = out = stream.CompoundStream()
            
            
            lastOffset = 0
            origStream = oldresponse.stream

            headerString = "\r\n--%s" % boundary
            if len(content_type) == 1:
                headerString+='\r\nContent-Type: %s' % content_type[0]
            headerString+="\r\nContent-Range: %s\r\n\r\n"
            
            for start,end in offsetList:
                out.addStream(headerString % 
                    http_headers.generateContentRange(('bytes', start, end, size)))

                content, origStream = makeSegment(origStream, lastOffset, start, end)
                lastOffset = end + 1
                out.addStream(content)
            origStream.close()
            out.addStream("\r\n--%s--\r\n" % boundary)
            return response
    else:
        return oldresponse