Exemplo n.º 1
0
    def _do_process_fired(self):
        def stop_process():
            progress.update(max_t)
            self.is_processing = False
            progress.close()

        self.is_processing = True
        self.init()
        max_t = len(self.filenames)
        progress = ProgressDialog(title="progress",
                                  message="Processing... ",
                                  max=max_t,
                                  show_time=True,
                                  can_cancel=True)
        progress.open()
        try:
            for i, image in enumerate(self):
                (cont, skip) = progress.update(i)
                self.process(image, i)
                if not cont or skip:
                    break
        except Exception as e:
            self.error = error_to_str(e)
            raise e
        finally:
            stop_process()

        self.post_process()
Exemplo n.º 2
0
    def run(self, progressbar=True):
        consts_c, vars_c = self.get_constraints()
        type_p, consts_p, vars_p = self.get_parameters()
        # confirm that we are in good state
        avl = self.runcase.avl
        avl.sendline()
        avl.expect(AVL.patterns['/'])
        avl.sendline('oper')
        avl.expect(AVL.patterns['/oper'])
        # first set the constants once and for all
        # constraints
        for c, v in consts_c.iteritems():
            avl.sendline(c.format(eval(v)))
        # paramters
        avl.sendline(type_p)
        for p, v in consts_p.iteritems():
            avl.sendline(p.format(eval(v)))
        avl.sendline()
        avl.sendline()
        avl.expect(AVL.patterns['/'])

        outs, modes, matrices = [], [], []
        # now run the case and get output while changing the vars each time
        if progressbar:
            progress = ProgressDialog(title="progress",
                                      message="calculating...",
                                      max=self.runcase_config.x.shape[0],
                                      show_time=True,
                                      can_cancel=True)
            try:
                progress.open()
            except Exception, e:
                logger.warning(e)
Exemplo n.º 3
0
 def run(self, progressbar=True):
     consts_c, vars_c = self.get_constraints()
     type_p, consts_p, vars_p = self.get_parameters()
     # confirm that we are in good state
     avl = self.runcase.avl
     avl.sendline()
     avl.expect(AVL.patterns['/'])
     avl.sendline('oper')
     avl.expect(AVL.patterns['/oper'])
     # first set the constants once and for all
     # constraints
     for c, v in consts_c.iteritems():
         avl.sendline(c.format(eval(v)))
     # paramters
     avl.sendline(type_p)
     for p, v in consts_p.iteritems():
         avl.sendline(p.format(eval(v)))
     avl.sendline()
     avl.sendline()
     avl.expect(AVL.patterns['/'])
     
     outs, modes, matrices = [], [], []
     # now run the case and get output while changing the vars each time
     if progressbar:
         progress = ProgressDialog(title="progress", message="calculating...", max=self.runcase_config.x.shape[0], show_time=True, can_cancel=True)
         try:
             progress.open()
         except Exception, e:
             logger.warning(e)
Exemplo n.º 4
0
def task_func(t):
    progress = ProgressDialog(title="progress", message="counting to %d"%t, max=t, show_time=True, can_cancel=True)
    progress.open()

    for i in range(0,t+1):
        time.sleep(1)
        print i
        (cont, skip) = progress.update(i)
        if not cont or skip:
            break

    progress.update(t)
Exemplo n.º 5
0
def task_func(t):
    progress = ProgressDialog(title="progress",
                              message="counting to %d" % t,
                              max=t,
                              show_time=True,
                              can_cancel=True)
    progress.open()

    for i in range(0, t + 1):
        time.sleep(1)
        print i
        (cont, skip) = progress.update(i)
        if not cont or skip:
            break

    progress.update(t)
Exemplo n.º 6
0
def parse_ftrace(filename,callback):
    fid = ftrace_open(filename)
    progress = ProgressDialog(title="ftrace", message="loading %s..."%(os.path.basename(filename)), max=100, show_time=True, can_cancel=True)
    progress.open()
    try:
        fid.seek(0,2)
    except ValueError:
        # gzip do not support seek end
        # do we uncompress everything. :-/
        # parsing is already far slower than uncompressing.
        while fid.read(1024):
            pass
    totsize = fid.tell()
    fid.seek(0,0)
    last_percent = 0
    # the base regular expressions
    event_re = re.compile(
        r'\s*(.+)-([0-9]+)\s+\[([0-9]+)\][^:]*\s+([0-9.]+): ([^:]*): (.*)')
    function_re = re.compile(
        r'\s*(.+)-([0-9]+)\s+\[([0-9]+)\][^:]*\s+([0-9.]+): (.*) <-(.*)')
    last_timestamp = 0
    linenumber = 0
    for line in fid:
        percent = int(fid.tell()*100./totsize)
        if percent != last_percent:
            last_percent = percent
            (cont, skip) = progress.update(percent)
            if not cont or skip:
                break
        linenumber+=1
        line = line.rstrip()
        res = event_re.match(line)
        if res:
            groups = res.groups()
            event_name = groups[4]
            event = {
                'linenumber': linenumber,
                'common_comm' : groups[0],
                'common_pid' :  int(groups[1]),
                'common_cpu' : int(groups[2]),
                'timestamp' : int(float(groups[3])*1000000),
                'event' : event_name,
                'event_arg' : groups[5]
                }
            last_timestamp = event['timestamp']
            to_match = event['event_arg']
            try:
                for name,regex,func in events_re[event_name]:
                    res = regex.search(to_match)
                    if res:
                        func(event,res.groups())
            except KeyError:
                pass
            callback(Event(event))
            continue

        res = function_re.match(line)
        if res:
            event = {
                'linenumber': linenumber,
                'common_comm' : res.group(1),
                'common_pid' :  int(res.group(2)),
                'common_cpu' : int(res.group(3)),
                'timestamp' : int(float(res.group(4))*1000000),
                'event':'function',
                'callee' : res.group(5),
                'caller' : res.group(6)
                }
            callback(Event(event))
            continue
    fid.close()
Exemplo n.º 7
0
    def finish_parsing(self):
        #put generated data in unresizable numpy format
        c_states = []
        i=0
        for tc in self.tmp_c_states:
            t = tcIdleState(name='cpu%d'%(i))
            while len(tc['start_ts'])>len(tc['end_ts']):
                tc['end_ts'].append(tc['start_ts'][-1])
            t.start_ts = numpy.array(tc['start_ts'])
            t.end_ts = numpy.array(tc['end_ts'])
            t.types = numpy.array(tc['types'])
            c_states.append(t)
            i+=1
        self.c_states=c_states
        i=0
        p_states = []
        for tc in self.tmp_p_states:
            t = tcFrequencyState(name='cpu%d'%(i))
            t.start_ts = numpy.array(tc['start_ts'])
            t.end_ts = numpy.array(tc['end_ts'])
            t.types = numpy.array(tc['types'])
            i+=1
            p_states.append(t)
        self.wake_events = numpy.array(self.wake_events,dtype=[('waker',tuple),('wakee',tuple),('time','uint64')])
        self.p_states=p_states
        processes = []
        last_ts = 0
        for pid,comm in self.tmp_process:
            tc = self.tmp_process[pid,comm]
            if len(tc['end_ts'])>0 and last_ts < tc['end_ts'][-1]:
                last_ts = tc['end_ts'][-1]
        if len(self.tmp_process) >0:
            progress = ProgressDialog(title="precomputing data", message="precomputing overview data...", max=len(self.tmp_process), show_time=False, can_cancel=False)
            progress.open()
        i = 0
        for pid,comm in self.tmp_process:
            tc = self.tmp_process[pid,comm]
            if self.process_types.has_key(tc['type']):
                klass, order = self.process_types[tc['type']]
                t = klass(pid=pid,comm=tc['comm'],project=self)
            else:
                t = tcProcess(pid=pid,comm=comm,project=self)
            while len(tc['start_ts'])>len(tc['end_ts']):
                tc['end_ts'].append(last_ts)
            t.start_ts = numpy.array(tc['start_ts'])
            t.end_ts = numpy.array(tc['end_ts'])
            t.types = numpy.array(tc['types'])
            t.cpus = numpy.array(tc['cpus'])
            t.comments = tc['comments'] #numpy.array(tc['comments'])
            t.process_type = tc["type"]
            # precompute 16 levels of overview cache
            t.get_overview_ts(1<<16)
            processes.append(t)
            progress.update(i)
            i += 1
        if len(self.tmp_process) > 0:
            progress.close()
            self.tmp_process = []
        def cmp_process(x,y):
            # sort process by type, pid, comm
            def type_index(t):
                try:
                    return self.process_types[t][1]
                except ValueError:
                    return len(order)+1
            c = cmp(type_index(x.process_type),type_index(y.process_type))
            if c != 0:
                return c
            c = cmp(x.pid,y.pid)
            if c != 0:
                return c
            c = cmp(x.comm,y.comm)
            return c

        processes.sort(cmp_process)
        self.processes = processes
        self.p_states=p_states
        self.tmp_c_states = []
        self.tmp_p_states = []
        self.tmp_process = {}
Exemplo n.º 8
0
    def finish_parsing(self):
        #put generated data in unresizable numpy format
        c_states = []
        i = 0
        for tc in self.tmp_c_states:
            t = tcIdleState(name='cpu%d' % (i))
            while len(tc['start_ts']) > len(tc['end_ts']):
                tc['end_ts'].append(tc['start_ts'][-1])
            t.start_ts = numpy.array(tc['start_ts'])
            t.end_ts = numpy.array(tc['end_ts'])
            t.types = numpy.array(tc['types'])
            c_states.append(t)
            i += 1
        self.c_states = c_states
        i = 0
        p_states = []
        for tc in self.tmp_p_states:
            t = tcFrequencyState(name='cpu%d' % (i))
            t.start_ts = numpy.array(tc['start_ts'])
            t.end_ts = numpy.array(tc['end_ts'])
            t.types = numpy.array(tc['types'])
            i += 1
            p_states.append(t)
        self.wake_events = numpy.array(self.wake_events,
                                       dtype=[('waker', tuple),
                                              ('wakee', tuple),
                                              ('time', 'uint64')])
        self.p_states = p_states
        processes = []
        last_ts = 0
        for pid, comm in self.tmp_process:
            tc = self.tmp_process[pid, comm]
            if len(tc['end_ts']) > 0 and last_ts < tc['end_ts'][-1]:
                last_ts = tc['end_ts'][-1]
        if len(self.tmp_process) > 0:
            progress = ProgressDialog(title="precomputing data",
                                      message="precomputing overview data...",
                                      max=len(self.tmp_process),
                                      show_time=False,
                                      can_cancel=False)
            progress.open()
        i = 0
        for pid, comm in self.tmp_process:
            tc = self.tmp_process[pid, comm]
            if self.process_types.has_key(tc['type']):
                klass, order = self.process_types[tc['type']]
                t = klass(pid=pid, comm=tc['comm'], project=self)
            else:
                t = tcProcess(pid=pid, comm=comm, project=self)
            while len(tc['start_ts']) > len(tc['end_ts']):
                tc['end_ts'].append(last_ts)
            t.start_ts = numpy.array(tc['start_ts'])
            t.end_ts = numpy.array(tc['end_ts'])
            t.types = numpy.array(tc['types'])
            t.cpus = numpy.array(tc['cpus'])
            t.comments = tc['comments']  #numpy.array(tc['comments'])
            t.process_type = tc["type"]
            # precompute 16 levels of overview cache
            t.get_overview_ts(1 << 16)
            processes.append(t)
            progress.update(i)
            i += 1
        if len(self.tmp_process) > 0:
            progress.close()
            self.tmp_process = []

        def cmp_process(x, y):
            # sort process by type, pid, comm
            def type_index(t):
                try:
                    return self.process_types[t][1]
                except ValueError:
                    return len(order) + 1

            c = cmp(type_index(x.process_type), type_index(y.process_type))
            if c != 0:
                return c
            c = cmp(x.pid, y.pid)
            if c != 0:
                return c
            c = cmp(x.comm, y.comm)
            return c

        processes.sort(cmp_process)
        self.processes = processes
        self.p_states = p_states
        self.tmp_c_states = []
        self.tmp_p_states = []
        self.tmp_process = {}
Exemplo n.º 9
0
    def read_data(self):
        """ Obtain x_locations, y_locations, data_locations, traces in a context

            Returns:
            ---------
            context: DataContext

        """

        # Check if the filename is valid for reading data
        if not self.file_handle:
            return None

        # Set the file reader at the first char.
        if self.file_handle.closed:
            self.file_handle = file(self.filename, 'rb')

        # Setup a progress dialog
        progress = ProgressDialog(title='Reading Segy Files',
                                  message='Reading Segy Files',
                                  max=100, show_time=True, can_cancel=True)
        progress.open()

        # Skip the card_image_header and binary header
        self.file_handle.seek(Segy.CARD_IMAGE_HEADER_LEN +
                              Segy.BINARY_HEADER_LEN)
        progress.update(1)

        # Check if data lengths are correct.
        x_data_len = struct.calcsize(self.x_format)
        y_data_len = struct.calcsize(self.y_format)
        inline_data_len = struct.calcsize(self.inline_format)
        crossline_data_len = struct.calcsize(self.crossline_format)

        if not (x_data_len == y_data_len and
                y_data_len == inline_data_len and
                inline_data_len == crossline_data_len):
            logger.error('SegyReader: Mismatch in format lengths')
            return None

        if self.scale_format != '':
            scale_data_len = struct.calcsize(self.scale_format)
            if scale_data_len != x_data_len:
                logger.error('SegyReader: Mismatch in format lengths')
                return None

        # Get trace header data of 240 bytes.
        header_data = self.file_handle.read(Segy.TRACE_HEADER_LEN)
        traces, read_error = [], False
        previous_update = 1
        while header_data != '' and not read_error:
            trace = self._read_trace(header_data, x_data_len)
            if trace is None:
                logger.error('SegyReader: Error in reading a trace')
                read_error = True
            else:
                traces.append(trace)
                header_data = self.file_handle.read(Segy.TRACE_HEADER_LEN)

            progress_pc = 1 + int(98.0*float(len(traces))/
                                  float(self.trace_count))
            if progress_pc - previous_update > 1:
                cont_val, skip_val = progress.update(progress_pc)
                previous_update = progress_pc

                # If the user has cancelled the action then stop the import
                # immediately
                if skip_val or not cont_val:
                    del traces
                    self.file_handle.close()
                    return None

        self.file_handle.close()
        progress.update(100)

        if read_error:
            del traces
            return None
        else:
            arr_descriptor = {'names': ('x','y','inline','crossline',
                                        'scale_factor', 'trace'),
                              'formats': ('f4', 'f4', 'f4', 'f4', 'f4',
                                          str(self.samples_per_trace)+'f4')
                              }
            traces = array(traces, dtype=arr_descriptor)
            filesplit = os.path.split(self.filename)
            name = str(os.path.splitext(filesplit[1])[0]).translate(trans_table)
            return DataContext(
                name=name,
                _bindings={'traces':traces['trace'],
                           'x_locations':traces['x'],
                           'y_locations':traces['y'],
                           'inline_values':traces['inline'],
                           'crossline_values':traces['crossline'],
                           'scale_factors':traces['scale_factor']})
        return