示例#1
0
 def on_release_print(self):
     # This is the only custom method for hist2d_alex()
     E1, E2 = min((self.xs, self.xe)), max((self.xs, self.xe))
     S1, S2 = min((self.ys, self.ye)), max((self.ys, self.ye))
     self.selection = dict(E1=E1, E2=E2, S1=S1, S2=S2)
     pprint("Selection: \nE1=%.2f, E2=%.2f, S1=%.2f, S2=%.2f\n" %\
             (E1,E2,S1,S2))
示例#2
0
 def on_press_print(self):
     if self.debug:
         pprint("%s %s %s\n" % (self.xp, self.yp, self.ich))
     mburst = self.d.mburst[self.ich]
     t_clk = self.xp/self.d.clk_p
     mask = (b_start(mburst) < t_clk)*(b_end(mburst) > t_clk)
     if mask.any():
         burst_index = np.where(mask)[0][0]
         ts = b_start(mburst)[burst_index]*self.d.clk_p
         width = b_width(mburst)[burst_index]*self.d.clk_p
         asym = self.asymmetry(self.ich)[burst_index]
         msg = "Burst [%d-CH%d]: " % (burst_index, self.ich+1)
         msg += "t = %7.2f ms" % (ts*1e3)
         msg += "   width=%4.2f ms" % (width*1e3)
         msg += "   size=(T%3d, D%3d, A%3d" % \
             (self.d.nt[self.ich][burst_index],
              self.d.nd[self.ich][burst_index],
              self.d.na[self.ich][burst_index])
         if self.d.ALEX:
             msg += ", AA%3d" % self.d.naa[self.ich][burst_index]
         msg += ")   E=%4.2f" % self.d.E[self.ich][burst_index]
         if self.d.ALEX:
             msg += "   S=%4.2f" % self.d.E[self.ich][burst_index]
         msg += "   Asym(D-A)=%5.2f ms" % asym
         pprint(msg + '\n')
示例#3
0
    def __init__(self, fig, scroll_step=10, debug=False):
        # Setup data range variables for scrolling
        self.debug = debug
        if self.debug: pprint('ScrollingToolQT init\n')

        self.fig = fig
        self.scroll_step = scroll_step
        self.xmin, self.xmax = fig.axes[0].get_xlim()
        self.width = 1 # axis units
        self.pos = 0   # axis units
        self.scale = 1e3 # conversion betweeen scrolling units and axis units

        # Some handy shortcuts
        self.ax = self.fig.axes[0]
        self.draw = self.fig.canvas.draw
        #self.draw_idle = self.fig.canvas.draw_idle

        # Retrive the QMainWindow used by current figure and add a toolbar
        # to host the new widgets
        QMainWin = fig.canvas.parent()
        toolbar = QtGui.QToolBar(QMainWin)
        QMainWin.addToolBar(QtCore.Qt.BottomToolBarArea, toolbar)

        # Create the slider and spinbox for x-axis scrolling in toolbar
        self.set_slider(toolbar)
        self.set_spinbox(toolbar)

        # Set the initial xlimits coherently with values in slider and spinbox
        self.ax.set_xlim(self.pos, self.pos + self.width)
        self.draw()
示例#4
0
 def xwidth_changed(self, width):
     if self.debug: pprint("Width (axis units) %f\n" % width)
     if width <= 0: return
     self.width = width
     self.slider.setSingleStep(self.width*self.scale/5.)
     self.slider.setPageStep(self.scroll_step*self.width*self.scale)
     old_xlim = self.ax.get_xlim()
     self.xpos_changed(old_xlim[0]*self.scale)
示例#5
0
 def on_motion(self, event):
     if event.inaxes != self.ax: return
     if self.debug:
         pprint('MOTION x=%d, y=%d, xdata=%f, ydata=%f\n' % (
             event.x, event.y, event.xdata, event.ydata))
     self.xe, self.ye = event.xdata, event.ydata
     self.on_motion_draw()
     self.fig.canvas.draw()
示例#6
0
 def on_release(self, event):
     if not self.pressed: return
     self.pressed = False
     if self.debug:
         pprint('RELEASE button=%d, x=%d, y=%d, xdata=%f, ydata=%f\n' % (
             event.button, event.x, event.y, event.xdata, event.ydata))
     self.fig.canvas.mpl_disconnect(self.id_motion)
     self.on_release_print()
示例#7
0
 def __init__(self, fig, ax, debug=False):
     self.ax = ax
     self.fig = fig
     self.pressed = False
     self.debug = debug
     self.id_press = fig.canvas.mpl_connect('button_press_event',
                                             self.on_press)
     if self.debug:
         pprint('Figure: ' + str(fig) + '\nAxis: ' + str(ax) + '\n')
示例#8
0
 def set_spinbox(self, parent):
     if self.debug: pprint('ScrollingToolQT set_spinbox\n')
     self.spinb = QtGui.QDoubleSpinBox(parent=parent)
     self.spinb.setDecimals(3)
     self.spinb.setRange(0.001,3600.)
     self.spinb.setSuffix(" s")
     self.spinb.setValue(self.width)   # set the initial width
     self.spinb.valueChanged.connect(self.xwidth_changed)
     parent.addWidget(self.spinb)
示例#9
0
 def set_slider(self, parent):
     if self.debug: pprint('ScrollingToolQT set_slider\n')
     self.slider = QtGui.QSlider(QtCore.Qt.Horizontal, parent=parent)
     self.slider.setTickPosition(QtGui.QSlider.TicksAbove)
     self.slider.setTickInterval((self.xmax-self.xmin)/10.*self.scale)
     self.slider.setMinimum(self.xmin*self.scale)
     self.slider.setMaximum((self.xmax-self.width)*self.scale)
     self.slider.setSingleStep(self.width*self.scale/4.)
     self.slider.setPageStep(self.scroll_step*self.width*self.scale)
     self.slider.setValue(self.pos*self.scale) # set the initial position
     self.slider.valueChanged.connect(self.xpos_changed)
     parent.addWidget(self.slider)
示例#10
0
 def on_press(self, event):
     if event.inaxes != self.ax: return
     self.pressed = True
     self.xs, self.ys = event.xdata, event.ydata
     if self.debug:
         pprint('PRESS button=%d, x=%d, y=%d, xdata=%f, ydata=%f\n' % (
             event.button, event.x, event.y, event.xdata, event.ydata))
     self.on_press_draw()
     self.fig.canvas.draw()
     self.id_motion = self.fig.canvas.mpl_connect('motion_notify_event',
                                                  self.on_motion)
     self.fig.canvas.mpl_connect('button_release_event',
                                          self.on_release)
示例#11
0
def smart_bg(d, ich=0, bin_=50e-3, step=1):
    """BG calculation through binning (WARNING: very slow!)."""
    bg = []
    t = d.ph_times_m[ich]*d.clk_p
    t_max = np.floor(t.max())
    pprint(" Calculation started:")
    for s in np.arange(step, t_max, step):
        #if (s % (t_max/50) == 0): pprint(" %d %%" % (s/t_max*100))
        h = np.histogram(t[(t<s)*(t>(s-step))],
                bins=np.arange(s-step, s+1e-3, bin_))
        print h[0]
        bg.append(h[0].min())
    pprint('\n')
    return np.array(bg)/bin_
示例#12
0
def burst_search_and_gate(dx, F=6, m=10, ph_sel1=Ph_sel(Dex='DAem'),
                          ph_sel2=Ph_sel(Aex='Aem'), mute=False):
    """Return a Data object containing bursts obtained by and-gate burst-search.

    The and-gate burst search is a composition of 2 burst searches performed
    on different photon selections. The bursts in the and-gate burst search
    are the overlapping bursts in the 2 initial burst searches, and their
    duration is the intersection of the two overlapping bursts.

    By default the 2 photon selections are D+A photons during D excitation
    (`Ph_sel(Dex='DAem')`) and A photons during A excitation
    (`Ph_sel(Aex='Aex')`).

    Arguments:
        dx (Data object): contains the data on which to perform the burst
            search. Background estimation must be performed before the search.
        F (float): Burst search parameter F.
        m (int): Burst search parameter m.
        ph_sel1 (Ph_sel object): photon selections used for bursts search 1.
        ph_sel2 (Ph_sel object): photon selections used for bursts search 2.
        mute (bool): if True nothing is printed. Default: False.

    Return:
        A new `Data` object containing bursts from the and-gate search.

    See also :meth:`fretbursts.burstlib.Data.burst_search_t`.
    """
    dx_d = dx
    dx_a = dx.copy(mute=mute)
    dx_and = dx.copy(mute=mute)

    dx_d.burst_search_t(L=m, m=m, F=F, ph_sel=ph_sel1, mute=mute)
    dx_a.burst_search_t(L=m, m=m, F=F, ph_sel=ph_sel2, mute=mute)

    mburst_and = []
    for mburst_d, mburst_a in zip(dx_d.mburst, dx_a.mburst):
        mburst_and.append(bslib.burst_and(mburst_d, mburst_a))

    dx_and.add(mburst=mburst_and)

    pprint(" - Calculating burst periods ...", mute)
    dx_and._calc_burst_period()                       # writes bp
    pprint("[DONE]\n", mute)

    # Note: dx_and.bg_bs will not be meaningful
    dx_and.add(m=m, L=m, F=F, P=None, ph_sel='AND-gate')
    dx_and.add(bg_corrected=False, leakage_corrected=False,
               dir_ex_corrected=False, dithering=False)

    pprint(" - Counting D and A ph and calculating FRET ... \n", mute)
    dx_and.calc_fret(count_ph=True, corrections=True, mute=mute)
    pprint("   [DONE Counting D/A]\n", mute)

    return dx_and
示例#13
0
 def load_dat_file():
     pprint(' - Loading DAT file: %s ... ' % fname_dat)
     ## Load data from raw file and store it in a HDF5 file
     data = load_xavier_manta_data(fname_dat, i_start=i_start,
                                   i_stop=i_stop, debug=debug)
     pprint('DONE.\n - Extracting timestamps and detectors ... ')
     timestamps, det = get_timestamps_detectors(data, nbits=24)
     pprint('DONE.\n - Processing and storing ... ')
     ph_times_m, big_fifo, ch_fifo = process_store(timestamps, det,
                     out_fname=fname_h5, fifo_flag=True, debug=False)
     pprint('DONE.\n')
     return ph_times_m, big_fifo, ch_fifo
示例#14
0
def multispot48(fname, leakage=0, gamma=1., reprocess=False,
                i_start=0, i_stop=None, debug=False):
    """Load a 48-ch multispot file and return a Data() object.
    """
    import tables
    basename, ext = os.path.splitext(fname)
    fname_h5 = basename + '.hdf5'
    fname_dat = basename + '.dat'

    def load_dat_file():
        pprint(' - Loading DAT file: %s ... ' % fname_dat)
        ## Load data from raw file and store it in a HDF5 file
        data = load_xavier_manta_data(fname_dat, i_start=i_start,
                                      i_stop=i_stop, debug=debug)
        pprint('DONE.\n - Extracting timestamps and detectors ... ')
        timestamps, det = get_timestamps_detectors(data, nbits=24)
        pprint('DONE.\n - Processing and storing ... ')
        ph_times_m, big_fifo, ch_fifo = process_store(timestamps, det,
                        out_fname=fname_h5, fifo_flag=True, debug=False)
        pprint('DONE.\n')
        return ph_times_m, big_fifo, ch_fifo

    if not (os.path.isfile(fname_dat) or os.path.isfile(fname_h5)):
        raise IOError('Data file "%s" not found' % basename)

    if os.path.exists(fname_h5) and not reprocess:
        ## There is a HDF5 file
        try:
            pprint(' - Loading HDF5 file: %s ... ' % fname_h5)
            ph_times_m, big_fifo, ch_fifo = \
                        load_manta_timestamps_pytables(fname_h5)
            pprint('DONE.\n')
        except tables.HDF5ExtError:
            pprint('\n  Ops! File may be truncated.\n')
            ph_times_m, big_fifo, ch_fifo = load_dat_file()
    else:
        ph_times_m, big_fifo, ch_fifo = load_dat_file()

    ## Current data has only acceptor ch
    A_em = [True] * len(ph_times_m)

    dx = Data(fname=fname, clk_p=10e-9, nch=48, leakage=leakage, gamma=gamma)
    dx.add(ph_times_m=ph_times_m, A_em=A_em, ALEX=False,
           data_file=ph_times_m.data_file, bg_data_file=ph_times_m.data_file)
    big_fifo_full = np.array([b[:].any() for b in big_fifo]).any()
    ch_fifo_full = np.array([b[:].any() for b in ch_fifo]).any()
    if big_fifo_full:
        print 'WARNING: Big-FIFO full, flags saved in Data()'
        dx.add(big_fifo=big_fifo)
    if ch_fifo_full:
        print 'WARNING: CH-FIFO full, flags saved in Data()'
        dx.add(ch_fifo=ch_fifo)
    return dx
示例#15
0
    def on_press(self, event):
        if self.debug:
            pprint('PRESS button=%d, x=%d, y=%d, xdata=%f, ydata=%f\n' % (
                event.button, event.x, event.y, event.xdata, event.ydata))

        iax = [i for i, ax in enumerate(self.ax_list) if ax == event.inaxes]
        if len(iax) == 0:
            if self.debug:
                pprint('NO axis found. event.inaxes "%s".\n' % event.inaxes)
                pprint('self.ax_list: ' + str(self.ax_list))
            return

        self.ich = iax[0]
        self.xp, self.yp = event.xdata, event.ydata
        self.on_press_print()
示例#16
0
def multispot8(fname, bytes_to_read=-1, swap_D_A=True, leakage=0, gamma=1.):
    """Load a 8-ch multispot file and return a Data() object. Cached version.
    """
    fname_c = fname + '_cache.pickle'
    try:
        var = pickle.load(open(fname_c, 'rb'))
        dx = Data(fname=fname, clk_p=12.5e-9, nch=8, leakage=leakage,
                  gamma=gamma)
        dx.add(ph_times_m=var['ph_times_m'], A_em=var['A_em'], ALEX=False)
        pprint(" - File loaded from cache: %s\n" % fname)
    except IOError:
        dx = multispot8_core(fname, bytes_to_read=bytes_to_read,
                             swap_D_A=swap_D_A, leakage=leakage, gamma=gamma)
        D = {'ph_times_m': dx.ph_times_m, 'A_em': dx.A_em}
        pprint(" - Pickling data ... ")
        pickle.dump(D, open(fname_c, 'wb'), -1)
        pprint("DONE\n")
    return dx
示例#17
0
                        default='./ckpts/miniImageNet/netFeatBest.pth')
    parser.add_argument('--base_lr_sib', type=float, default=0.001)
    parser.add_argument('--sib_lr_mode',
                        type=str,
                        default='EBL',
                        choices=['HPL', 'EBL'])
    parser.add_argument('--phase_sib',
                        type=str,
                        default='meta_train',
                        choices=['meta_train', 'meta_eval'])
    parser.add_argument('--meta_eval_load_path',
                        type=str,
                        default='./ckpts/miniImageNet/e3bm_ckpt.pth')

    args = parser.parse_args()
    pprint(vars(args))
    print('Experiment label: ' + args.label)
    set_gpu(args.gpu)

    occupy_memory(args.gpu)
    print('Occupy GPU memory in advance.')

    if args.baseline == 'MTL':
        if args.seed == 0:
            torch.backends.cudnn.benchmark = True
        else:
            torch.manual_seed(args.seed)
            torch.cuda.manual_seed(args.seed)
            torch.backends.cudnn.deterministic = True
            torch.backends.cudnn.benchmark = False
示例#18
0
    def __init__(self, args):
        param = configs.__dict__[args.config]()
        args.shot = param.shot
        args.test = param.test
        args.debug = param.debug
        args.deconfound = param.deconfound
        args.meta_label = param.meta_label
        args.init_weights = param.init_weights
        self.test_iter = param.test_iter
        args.param = param
        pprint(vars(args))

        # Set the folder to save the records and checkpoints
        log_base_dir = '/data2/yuezhongqi/Model/mtl/logs/'
        if not osp.exists(log_base_dir):
            os.mkdir(log_base_dir)
        meta_base_dir = osp.join(log_base_dir, 'meta')
        if not osp.exists(meta_base_dir):
            os.mkdir(meta_base_dir)
        save_path1 = '_'.join([args.dataset, args.model_type, 'MTL'])
        save_path2 = 'shot' + str(args.shot) + '_way' + str(args.way) + '_query' + str(args.train_query) + \
            '_step' + str(args.step_size) + '_gamma' + str(args.gamma) + '_lr1' + str(args.meta_lr1) + '_lr2' + str(args.meta_lr2) + \
            '_batch' + str(args.num_batch) + '_maxepoch' + str(args.max_epoch) + \
            '_baselr' + str(args.base_lr) + '_updatestep' + str(args.update_step) + \
            '_stepsize' + str(args.step_size) + '_' + args.meta_label
        args.save_path = meta_base_dir + '/' + save_path1 + '_' + save_path2
        ensure_path(args.save_path)

        # Set args to be shareable in the class
        self.args = args

        # Load meta-train set
        self.trainset = Dataset('train',
                                self.args,
                                dataset=self.args.param.dataset,
                                train_aug=False)
        num_workers = 8
        if args.debug:
            num_workers = 0
        self.train_sampler = CategoriesSampler(
            self.trainset.label, self.args.num_batch, self.args.way,
            self.args.shot + self.args.train_query)
        self.train_loader = DataLoader(dataset=self.trainset,
                                       batch_sampler=self.train_sampler,
                                       num_workers=num_workers,
                                       pin_memory=True)

        # Load meta-val set
        self.valset = Dataset('val',
                              self.args,
                              dataset=self.args.param.dataset,
                              train_aug=False)
        self.val_sampler = CategoriesSampler(
            self.valset.label, self.test_iter, self.args.way,
            self.args.shot + self.args.val_query)
        self.val_loader = DataLoader(dataset=self.valset,
                                     batch_sampler=self.val_sampler,
                                     num_workers=num_workers,
                                     pin_memory=True)

        # Build meta-transfer learning model
        self.model = MtlLearner(self.args)

        # load pretrained model without FC classifier
        self.model.load_pretrain_weight(self.args.init_weights)
        '''
        self.model_dict = self.model.state_dict()
        if self.args.init_weights is not None:
            pretrained_dict = torch.load(self.args.init_weights)['params']
        else:
            pre_base_dir = osp.join(log_base_dir, 'pre')
            pre_save_path1 = '_'.join([args.dataset, args.model_type])
            pre_save_path2 = 'batchsize' + str(args.pre_batch_size) + '_lr' + str(args.pre_lr) + '_gamma' + str(args.pre_gamma) + '_step' + \
                str(args.pre_step_size) + '_maxepoch' + str(args.pre_max_epoch)
            pre_save_path = pre_base_dir + '/' + pre_save_path1 + '_' + pre_save_path2
            pretrained_dict = torch.load(osp.join(pre_save_path, 'max_acc.pth'))['params']
        pretrained_dict = {'encoder.'+k: v for k, v in pretrained_dict.items()}
        pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in self.model_dict}
        print(pretrained_dict.keys())
        self.model_dict.update(pretrained_dict)
        self.model.load_state_dict(self.model_dict)
        '''

        # Set model to GPU
        if torch.cuda.is_available():
            torch.backends.cudnn.benchmark = True
            self.model = self.model.cuda()
            if self.args.param.model == "wideres":
                print("Using Parallel")
                self.model.encoder = torch.nn.DataParallel(
                    self.model.encoder).cuda()

        # Set optimizer
        self.optimizer = torch.optim.Adam(
            [{
                'params':
                filter(lambda p: p.requires_grad,
                       self.model.encoder.parameters())
            }, {
                'params': self.model.base_learner.parameters(),
                'lr': self.args.meta_lr2
            }],
            lr=self.args.meta_lr1)
        # Set learning rate scheduler
        self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
            self.optimizer,
            step_size=self.args.step_size,
            gamma=self.args.gamma)

        if not self.args.deconfound:
            self.criterion = torch.nn.CrossEntropyLoss().cuda()
        else:
            self.criterion = torch.nn.NLLLoss().cuda()

        # Enable evaluation with Cross
        if args.cross:
            args.param.dataset = "cross"
示例#19
0
def store(d, compression=dict(complevel=6, complib='zlib'), h5_fname=None,
          verbose=True):
    """
    Saves the `Data` object `d` in the HDF5-Ph-Data format.

    As a side effect the `d` object is modified by adding the attribute
    `data_file` that contains a reference to the pytables file.

    Arguments:
        d (Data object): the Data object containing the smFRET measurement.
        compression (dict): a dictionary containing the compression type
            and level. Passed to pytables `tables.Filters()`.
        h5_fname (string or None): if not None, contains the file name
            to be used for the HDF5 file. If None, the file name is generated
            from `d.fname`, by replacing the original extension with '.hdf5'.
        verbose (bool): if True prints the name of the saved file.

    For description and specs of the HDF5-Ph-Data format see:
    https://github.com/tritemio/FRETBursts/wiki/HDF5-Ph-Data-format-0.2-Draft
    """
    comp_filter = tables.Filters(**compression)
    if 'lifetime' not in d:
        # Test on different fields for ALEX and non-ALEX
        d.add(lifetime = ('nanotimes_t' in d) or ('nanotimes' in d))

    if h5_fname is None:
        basename, extension = os.path.splitext(d.fname)
        h5_fname = basename + '.hdf5'

    if os.path.exists(h5_fname):
        basename, extension = os.path.splitext(h5_fname)
        h5_fname = basename + '_new_copy.hdf5'

    pprint('Saving: %s' % h5_fname, not verbose)
    data_file = tables.open_file(h5_fname, mode = "w",
                                 title = "Confocal smFRET data")
    writer = H5Writer(data_file, d, comp_filter)

    ## Save the root-node metadata
    for name, value in _format_meta.items():
        data_file.root._f_setattr(name, value)

    ## Save the mandatory parameters
    mandatory_fields = ['timestamps_unit', 'num_spots', 'alex', 'lifetime']
    for field in mandatory_fields:
        writer.add_array('/', field)

    if d.ALEX:
        writer.add_array('/', 'alex_period')
        writer.add_array('/', 'alex_period_donor')
        writer.add_array('/', 'alex_period_acceptor')

    ## Save the photon-data
    if d.nch == 1:
        # Single-spot: using "basic layout"
        ph_group = writer.add_group('/', 'photon_data')

        if d.ALEX:
            for field in ['timestamps', 'detectors']:
                writer.add_carray(ph_group, field)
            donor, accept = d.det_donor_accept
        else:
            writer.add_carray(ph_group, 'timestamps', obj=d.ph_times_m[0])
            writer.add_carray(ph_group, 'detectors', obj=d.A_em[0])
            donor, accept = 0, 1

        det_group = writer.add_group(ph_group, 'detectors_specs')
        writer.add_array(det_group, 'donor', obj=donor)
        writer.add_array(det_group, 'acceptor', obj=accept)

        # If present save nanotime data
        if d.lifetime:
            if d.ALEX:
                writer.add_carray(ph_group, 'nanotimes', obj=d.nanotimes_t)
            else:
                writer.add_carray(ph_group, 'nanotimes')
            nt_group = writer.add_group(ph_group, 'nanotimes_specs')

            # Mandatory specs
            nanotimes_specs = ['tcspc_bin', 'tcspc_nbins', 'tcspc_range']
            for spec in nanotimes_specs:
                writer.add_array(nt_group, spec, obj=d.nanotimes_params[spec])

            # Optional specs
            nanotimes_specs = ['tau_accept_only', 'tau_donor_only',
                               'tau_fret_donor', 'tau_fret_trans']
            for spec in nanotimes_specs:
                if spec in d.nanotimes_params:
                    writer.add_array(nt_group, spec,
                                     obj=d.nanotimes_params[spec])

        if 'par' in d:
            writer.add_carray(ph_group, 'particles', obj=d.par[0])

    else:
        # Multi-spot: using "multi-spot layout"
        for ich, ph in enumerate(d.iter_ph_times()):
            ch_group = writer.add_group('/', 'photon_data_%d' % ich,
                                        metakey='photon_data')

            writer.add_carray(ch_group, 'timestamps', obj=ph)

            # If A_em[ich] is a slice we have a single color so we don't
            # save the detector (there is only one detector per channel).
            a_em = d.A_em[ich]
            if type(a_em) is not slice:
                writer.add_carray(ch_group, 'detectors', obj=a_em)
                # Detector specs
                det_group = writer.add_group(ch_group, 'detectors_specs')
                writer.add_array(det_group, 'donor', obj=False)
                writer.add_array(det_group, 'acceptor', obj=True)

    data_file.flush()
    d.add(data_file=data_file)
示例#20
0
 def on_release_print(self):
     pprint('X Span: (%d, %d)\n' % (self.xs, self.xe))
示例#21
0
 def xpos_changed(self, pos):
     if self.debug: pprint("Position (in scroll units) %f\n" %pos)
     pos /= self.scale
     self.ax.set_xlim(pos, pos+self.width)
     self.draw()