Beispiel #1
0
    def __init__(self, parent=None):
        super().__init__(parent=parent)
        self.nextEnabled = False

        title = QLabel("Programming SmartDrive Bluetooth")
        self.progressBar = ProgressBar()
        self.startButton = QPushButton("Start")
        self.startButton.clicked.connect(self.onStart)
        self.startButton.show()
        self.stopButton = QPushButton("Stop")
        self.stopButton.clicked.connect(self.onStop)
        self.stopButton.hide()

        label = QLabel("Will now automatically program SmartDrive Bluetooth.")
        label.setWordWrap(True)

        self.sn = QLabel("S/N: ")

        self.lk = QLabel("License: ")

        self.addr = QLabel("Address: ")

        self.labels = [
            title, label, self.sn, self.lk, self.addr, self.progressBar,
            self.startButton, self.stopButton
        ]

        self.layout.addWidget(title)
        self.layout.addWidget(label)
        self.layout.addWidget(self.sn)
        self.layout.addWidget(self.lk)
        self.layout.addWidget(self.addr)
        self.layout.addWidget(self.progressBar)
        self.layout.addWidget(self.startButton)
        self.layout.addWidget(self.stopButton)
Beispiel #2
0
def partial_plot(clf,
                 X_,
                 x_name,
                 labels,
                 n_points=100,
                 lims=None,
                 n_samp=1000,
                 categorical=False):
    X = X_.copy()
    N = len(X)
    if lims == None:
        x_min = X[x_name].min()
        x_max = X[x_name].max()
    else:
        x_min = lims[0]
        x_max = lims[1]
    if categorical:
        x = np.array([x_min, x_max] * int(n_points / 2.))
    else:
        x = np.linspace(x_min, x_max, n_points)
    p = []
    pb = ProgressBar()
    for i, x_i in enumerate(x):
        X[x_name] = [x_i] * N
        _idx = np.random.randint(
            N, size=n_samp)  #sub sample to reduce time to evaluate
        p.append(
            clf.predict_proba(X.values[_idx], labels=labels[_idx])[1].mean(0))
        pb.update_progress(i / n_points)
    return x, np.array(p)
Beispiel #3
0
def run_burnin(sampler,startPos,nSteps,storechain=False):
    iStep = 0
    bar = ProgressBar()
    for pos, prob, state in sampler.sample(startPos,iterations=nSteps,storechain=storechain):
        bar.render(int(100*iStep/nSteps),'running Burn In')
        iStep += 1
    return pos, prob, state
Beispiel #4
0
    def __init__(self, parent=None):
        super().__init__(parent=parent)
        self.nextEnabled = False

        title = QLabel("Programming Bootloader")
        switchesLabel = QLabel(
            "Set the MX2+ DIP switches for bootloader programming as shown below.\nThen power-cycle the SmartDrive."
        )
        switchesLabel.setWordWrap(True)
        self.pixMap = QtGui.QPixmap(
            resource.path('images/bootloaderProgramming.jpg'))

        self.progressBar = ProgressBar()
        self.startButton = QPushButton("Start")
        self.startButton.clicked.connect(self.onStart)
        self.startButton.show()
        self.stopButton = QPushButton("Stop")
        self.stopButton.clicked.connect(self.onStop)
        self.stopButton.hide()

        self.labels = [
            title, switchesLabel, self.progressBar, self.startButton,
            self.stopButton
        ]

        self.picture = QLabel(self)
        self.picture.setPixmap(
            self.pixMap.scaled(self.getPictureSize(), Qt.KeepAspectRatio))

        self.layout.addWidget(title)
        self.layout.addWidget(switchesLabel)
        self.layout.addWidget(self.picture)
        self.layout.addWidget(self.progressBar)
        self.layout.addWidget(self.startButton)
        self.layout.addWidget(self.stopButton)
Beispiel #5
0
def colormap(hm, gradient, smoothness):
    width, height = hm.size

    img = Image.new('RGBA', (width, height), color=(0, 0, 0, 0))

    colorlist = []

    for g in range(len(gradient) - 1):
        (start, color) = gradient[g]
        (nextStart, nextColor) = gradient[g + 1]

        begin = start * smoothness
        end = (nextStart - start) * smoothness

        sublist = list(Color(color).range_to(Color(nextColor), end))
        colorlist += sublist

    prog = ProgressBar(height)

    print("\nColor Map:")

    for y in range(0, height):
        prog.show(y)
        for x in range(0, width):
            value = hm.getpixel((x, y))
            if value != -2147483648:
                setpixel(x, y, value, colorlist, img)
    return img
Beispiel #6
0
    def render(self, scene):
        """ Render image from stuff inside scene """
        width = scene.width
        height = scene.height
        ar = width / height

        camera = scene.camera
        im = Image(width, height)

        xmin = -1
        xmax = 1
        ymax = xmax / ar
        ymin = -ymax
        dx = (xmax - xmin) / (width - 1)
        dy = (ymax - ymin) / (height - 1)

        pb = ProgressBar(height * width)

        for j in range(height):
            y = ymin + dy * j
            for i in range(width):
                x = xmin + dx * i

                ray = Ray(camera, Vector(x, y, 0) - camera)
                c = self.ray_trace(ray, scene)
                im.set_pixel(j, i, c)
                pb.update(1)
        return im
Beispiel #7
0
def gen_var_result_scores(clf, X_train, X_test, y_train=None,
                              n_rounds=100, verbose=False, fit=True):
    """
    Given classifier and training data, return variable importances on the
    test data on a per-sample basis. UPDATED.
    
    Result scores represent the difference between the observed score and
    the mean score obtained if the specific variable is resampled from the
    training data randomly.
    """
    if fit:
        if verbose:
            print 'Training model...'
            sys.stdout.flush()
            t0 = time.time()
        clf.fit(X_train, y_train)
        if verbose:
            t1 = time.time()
            print 'Training took %.2f seconds' % (t1 - t0)
    real_scores = clf.predict_proba(X_test)[:, 1]
    result_scores = np.zeros(X_test.shape)
    if verbose:
        pb = ProgressBar()
        progress = 0
    for var in range(X_train.shape[1]):
        single_var_scores = np.zeros([X_test.shape[0], n_rounds])
        X_test_mod = np.copy(X_test)
        for j in range(n_rounds):
            if verbose:
                progress += 1
                pb.update_progress(progress / float(n_rounds * X_train.shape[1]))
            X_test_mod[:, var] = np.random.choice(X_train[:, var], X_test.shape[0], replace=True)
            single_var_scores[:, j] = clf.predict_proba(X_test_mod)[:, 1]
        result_scores[:, var] = np.abs(real_scores - np.mean(single_var_scores, axis=1))
    return result_scores
Beispiel #8
0
    def cal_rows(self, css, scene, h, dh):
        width = scene.width
        height = scene.height
        ar = width / height
        camera = scene.camera
        xmin = -1
        xmax = 1
        ymax = xmax / ar
        ymin = -ymax
        dx = (xmax - xmin) / (width - 1)
        dy = (ymax - ymin) / (height - 1)
        cs = []  # 3 * width * (dh)
        pb = ProgressBar((dh) * width)

        for j in range(h, h + dh):
            y = ymin + dy * j
            for i in range(width):
                x = xmin + dx * i
                ray = Ray(camera, Vector(x, y, 0) - camera)
                c = self.ray_trace(ray, scene)
                cs.append(c.x)
                cs.append(c.y)
                cs.append(c.z)
                pb.update(1)

        a = h * width * 3
        b = (h + dh) * width * 3
        css[a:b] = cs
Beispiel #9
0
def font_file_cracker():
    try:
        file = request.files.get('font_file')
        type_ = request.form.get('type')
    except Exception as _e:
        return jsonify({'code': 400, 'msg': f'lose args,{_e}', 'res': {}})

    filename = re.sub('[(()) ]', '', file.filename)
    if not os.path.exists('./font_collection'):
        os.mkdir('./font_collection')

    file.save('./font_collection/' + filename)

    if config.is_online and not check_file('./font_collection/' + filename):
        return jsonify({'code': 300, 'msg': 'Please use example file(*^_^*)'})

    ProgressBar.init()

    res = ocr_processor('./font_collection/' + filename)

    if type_ == 'html':
        font_dict = {}
        for foo in res:
            font_dict[foo['name']] = foo['ocr_result']

        return jsonify({
            'code': 200,
            'html': render_template('images.html', result=res),
            'font_dict': font_dict
        })
    else:
        return jsonify({'code': 200, 'msg': 'success', 'res': res})
Beispiel #10
0
def resample_lts( lts, out_lts, new_rate = 5000, cutoff = 2000 ):
	"""
	Resample a LabeledTimeseries to an output LabeledTimeseries.

	Returns a LabeledTimeseries containing the new resampled data.

	Keyword Arguments:
	lts -- An input LabeledTimeseries to resample
	out_lts -- A LabeledTimeseries to output the resampled data to.  If this
		is a string, create a new sibling LabeledTimeseries with this name.
	new_rate -- The sampling rate to resample to. (Default: 5000 samples per sec)
	cutoff -- The cutoff value for the FIR low-pass filter.  Ensure this is set
		considerably *below* the nyquist of 'new_rate'. (Default: 2000 Hz)
	"""
	from h5eeg import LabeledTimeseries

	# Create the new LabeledTimeseries if necessary
	if isinstance( out_lts, basestring ):
		parent_group = lts.dataset.parent
		new_length = int( len( lts ) * ( float( new_rate ) / float( lts.get_rate() ) ) )
		out_lts = LabeledTimeseries.create( parent_group, new_length, 
			lts.get_labels(), new_rate, name = out_lts )

	# Resample lts data channel by channel
	pbar = ProgressBar( len( lts.get_labels() ) )
	for idx, ch_name in enumerate( lts.get_labels() ):
		print 'Resampling Channel: %s...' % ch_name
		ch_data = resample( lts[ :, ch_name ], lts.get_rate(), new_rate, cutoff )
		convert_length = min( len( ch_data ), len( out_lts ) )
		out_lts[ 0:convert_length, ch_name ] = ch_data[ 0:convert_length ]
		pbar.animate( idx )

	# lts.dataset.file.flush() # This takes forever.  Don't bother.
	return out_lts
Beispiel #11
0
def run_mcmc_save(sampler, startPos, nSteps, rState, file, **kwargs):
    '''runs and MCMC chain with emcee, and saves steps to a file'''
    #open chain save file
    if file:
        f = open(file, "w")
        f.close()
    iStep = 0
    bar = ProgressBar()
    for pos, prob, state in sampler.sample(startPos,
                                           iterations=nSteps,
                                           rstate0=rState,
                                           storechain=True,
                                           **kwargs):
        if file:
            f = open(file, "a")
        bar.render(int(100 * iStep / nSteps), 'running MCMC')
        iStep += 1
        for k in range(pos.shape[0]):
            # loop over all walkers and append to file
            thisPos = pos[k]
            thisProb = prob[k]
            if file:
                f.write("{0:4d} {1:s} {2:f}\n".format(
                    k, " ".join(map(str, thisPos)), thisProb))
        if file:
            f.close()
    return sampler
Beispiel #12
0
def run_ptmcmc_save(sampler, startPos, nSteps, file, **kwargs):
    '''runs PT MCMC and saves zero temperature chain to file'''
    if not os.path.exists(file):
        f = open(file, "w")
        f.close()

    iStep = 0
    bar = ProgressBar()
    for pos, prob, like in sampler.sample(startPos,
                                          iterations=nSteps,
                                          storechain=True,
                                          **kwargs):
        bar.render(int(100 * iStep / nSteps), 'running MCMC')
        iStep += 1
        f = open(file, "a")
        # pos is shape (ntemps, nwalkers, npars)
        # prob is shape (ntemps, nwalkers)
        # loop over all walkers for zero temp and append to file
        zpos = pos[0, ...]
        zprob = prob[0, ...]
        for k in range(zpos.shape[0]):
            thisPos = zpos[k]
            thisProb = zprob[k]
            f.write("{0:4d} {1:s} {2:f}\n".format(k,
                                                  " ".join(map(str, thisPos)),
                                                  thisProb))
    f.close()
    return sampler
Beispiel #13
0
def lightmap(nm, cm, light, ambient):
    width, height = nm.size
    cwidth, cheight = cm.size

    if cwidth != width or cheight != height:
        print("Resolutions do not match.")
        exit()

    shaded = Image.new('RGBA', (width, height), color=(0, 0, 0, 0))

    prog = ProgressBar(height)

    print("\nLight Map:")

    for y in range(0, height):
        prog.show(y)
        for x in range(0, width):
            (cr, cg, cb, ca) = cm.getpixel((x, y))
            (r, g, b, a) = nm.getpixel((x, y))
            if a != 0:
                normal = vmath.Vector3(\
                    (r/255 - 0.5) * 2,
                    (g/255 - 0.5) * 2,
                    (b/255 - 0.5) * 2)
                angle = normal.angle(light, 'deg')
                illumination = 1 - (angle / 180)
                illumination = ambient + (1 - ambient) * illumination
                cr = int(illumination * cr)
                cg = int(illumination * cg)
                cb = int(illumination * cb)
                shaded.putpixel((x, y), (cr, cg, cb, ca))
    return shaded
Beispiel #14
0
def spfilt( dataset, filt, io_blocksize = 1024, progress_bar = False ):
	"""
	Perform a spatial filter on an input two dimensional dataset.

	Returns dataset
	
	Keyword Arguments:
	dataset -- ndarray of size samples x channels
	
	filt -- ndarray spatial filter of channels x channels

	io_blocksize -- Number of samples to operate on at a time
		(Default: 1024)

	progress_bar -- If set to true, show a progress bar.
		(Default: False)
	
	TODO: Implement Parallelism
	"""
	start = 0
	nsamp = len( dataset )
	if progress_bar:
		from progress import ProgressBar
		pbar = ProgressBar( nsamp )

	while True:
		end_block = start + io_blocksize
		end = min( end_block, nsamp - 1 )
		dataset[ start:end, : ] = np.dot( dataset[ start:end, : ], filt )
		start = end

		if progress_bar: pbar.animate( start )
		if end_block >= nsamp: break

	return dataset
    def learn(self, samples, epochs=25000, noise=0, test_samples=None, show_progress=True):
        ''' Learn given distribution using n data
                List of sample sets
                Number of epochs to be ran for each sample set
        '''

        # Check if samples is a list
        if type(samples) not in [tuple,list]:
            samples = (samples,)
            epochs = (epochs,)

        n = 0 # total number of epochs to be ran
        
        for j in range(len(samples)):
            n += epochs[j]
        
        self.entropy = []
        self.distortion = []

        if show_progress:
            bar = ProgressBar(widgets=[Percentage(), Bar()], maxval=n).start()
        index = 0

        for j in range(len(samples)):
            
            self.samples = samples[j]
            I = np.random.randint(0,self.samples.shape[0],n)
            
            for i in range(epochs[j]):
                # Set sigma and learning rate via time
                t = index/float(n)
                lrate = self.lrate_i*(self.lrate_f/self.lrate_i)**t
                sigma = self.sigma_i*(self.sigma_f/self.sigma_i)**t
                C = self.adj.copy()

                # Learn something
                S = self.samples[I[i]] + noise*(2*np.random.random(len(self.samples[I[i]]))-1)
                S = np.minimum(np.maximum(S,0),1)
                self.learn_data(S,lrate,sigma)

                #self.learn_data(self.samples[I[i]],lrate,sigma)
                if i%100 == 0:
                    self.entropy.append(((self.adj-C)**2).sum())

                    if test_samples is not None:
                        distortion = self.compute_distortion(test_samples)
                    else:
                        distortion = self.compute_distortion(self.samples)

                    self.distortion.append(distortion)

                if show_progress:
                    bar.update(index+1)

                index = index+1

        if show_progress: bar.finish()
Beispiel #16
0
def run_burnin(sampler, startPos, nSteps, storechain=False):
    iStep = 0
    bar = ProgressBar()
    for pos, prob, state in sampler.sample(startPos,
                                           iterations=nSteps,
                                           storechain=storechain):
        bar.render(int(100 * iStep / nSteps), 'running Burn In')
        iStep += 1
    return pos, prob, state
Beispiel #17
0
async def queue_tasks(
    tasks: AsyncIterable[tuple[AuditTask, LogQueue]],
    task_q: asyncio.Queue[tuple[AuditTask, LogQueue]],
    progress: ProgressBar,
) -> None:
    async for task, logqueue in tasks:
        task.set_start_stage_cb(progress.enter_stage)
        task.set_finish_stage_cb(progress.exit_stage)
        await task_q.put((task, logqueue))
        progress.task_enqueued()
Beispiel #18
0
def dropsubj_run():
    pb = ProgressBar(len(PERCENT_REMOVE))
    for p in PERCENT_REMOVE:
        this_rand = lambda d: replace_percent_subjects(d, p)
        cleaner = lambda d: remove_percent_deviant_subjects(d, p)
        params = {'p': p, 'cleaner': 'devsubj'}

        for row in run_experiment(this_rand, [cleaner], [params]):
            yield row
            pb.incr_and_errput()
 def __init__(self, filename):
     dfile = filename.split('.')[0] + '.pkl'
     try:
         dump = self.loadfile(dfile)
         self.bag_of_word = dump['bag_of_word']
         self.bag_of_index = dump['bag_of_index']
         self.markov_matrix = np.array(dump['markov_matrix'])
     except OSError as e:
         self.bag_of_word = {}
         self.bag_of_index = {}
         self.markov_matrix = np.array([[0]])
         index = 0
         try:
             with open(filename, 'r') as file:
                 print(
                     "log: this would show up if its a new file. A markov chain will be created and saved"
                 )
                 low = (' '.join(file.read().splitlines())).split(' ')
                 progress = ProgressBar(len(low) - 1, fmt=ProgressBar.FULL)
                 for i in range(progress.total):
                     progress.current += 1
                     progress()
                     if self.bag_of_word.setdefault(low[i], index) == index:
                         self.bag_of_index[index] = low[i]
                         self.markov_matrix = np.pad(self.markov_matrix,
                                                     [(0, self.max(index)),
                                                      (0, self.max(index))],
                                                     mode='constant')
                         index += 1
                     if self.bag_of_word.setdefault(low[i + 1],
                                                    index) == index:
                         self.bag_of_index[index] = low[i + 1]
                         self.markov_matrix = np.pad(self.markov_matrix,
                                                     [(0, self.max(index)),
                                                      (0, self.max(index))],
                                                     mode='constant')
                         index += 1
                     self.markov_matrix[self.bag_of_word[low[i]]][
                         self.bag_of_word[low[i + 1]]] += 1
                 progress.done()
             s = np.sum(self.markov_matrix, axis=1)[:, np.newaxis]
             s[s == 0] = 1
             self.markov_matrix = self.markov_matrix / s
             self.markov_matrix[self.markov_matrix.shape[0] - 1][0] = 1
             dump = {}
             dump['bag_of_word'] = self.bag_of_word
             dump['bag_of_index'] = self.bag_of_index
             dump['markov_matrix'] = self.markov_matrix
             self.savefile(dump, dfile)
             del (dump)
             print("log:chain for ", filename, " is created")
         except OSError as e:
             print("file not found !")
             exit()
Beispiel #20
0
 def __init__(self, url, gitlab, includes=[], excludes=[], concurrency=1, in_file=None, method="http"):
     self.in_file = in_file
     self.method = method
     self.concurrency = concurrency
     self.excludes = excludes
     self.includes = includes
     self.url = url
     self.gitlab = gitlab
     self.root = Node("", root_path="", url=url)
     self.disable_progress = False
     self.progress = ProgressBar('* loading tree', self.disable_progress)
Beispiel #21
0
def minrho_run():
    pb = ProgressBar(len(NUM_DROP))
    sys.stderr.write("Beginning minrho eval.\n")
    pb.errput()

    for n in NUM_DROP:
        this_rand = lambda d: replace_subjects(d, n)
        cleaner = lambda d: remove_most_deviant_subjects(d, n)
        params = {'n': n, 'cleaner': 'minrho'}

        for row in run_experiment(this_rand, [cleaner], [params]):
            yield row
            pb.incr_and_errput()
Beispiel #22
0
def convert_bcistream( dat, h5filename = None, overwrite = False ):

	# If we don't have a filename for the resulting hdf5 file, we will
	# just make an hdf5 file from the current filename in the same directory
	if h5filename == None:
		filepath, h5filename = os.path.split( dat.filename )
		h5filename = os.path.splitext( h5filename )[0]
		h5filename = os.path.join( filepath, h5filename + '.hdf5' )

	if os.path.isfile( h5filename ) and not overwrite:
		print "Error: %s exists already.  Not overwriting." % h5filename
		return None

	# Create the required group and set group attributes
	# NOTE: BCI2000 file format has no good record of experiment
	outfile = h5.File( h5filename, 'w' )
	group = H5EEGGroup.create( outfile, 
		subject = dat.params[ 'SubjectName' ],
		timestamp = dat.datestamp )

	# Create the EEG, AUX, and Event datasets
	eeg_labels = [ str( i + 1 ) for i in range( dat.nchan ) ]
	if 'ChannelNames' in dat.params.keys(): eeg_labels = dat.params[ 'ChannelNames' ]
	eeg_offsets = dat.offsets.astype( 'int32' )
	H5EEGDataset.create( group, dat.samples(), eeg_labels, eeg_offsets, dat.gains, 
		name = 'raw', rate = dat.samplingrate(), bytes_per_sample = dat.bytesperchannel )
	aux_labels = dat.statedefs.keys()
	H5EEGAuxDataset.create( group, dat.samples(), labels = aux_labels, 
		rate = dat.samplingrate() )
	H5EEGEvents.create( group )

	# Read the data into the h5f file in blocks of 1 second each
	dat.seek( 0 )
	eeg_dset = group.eeg().dataset
	aux_dset = group.aux().dataset
	print 'Converting %s to %s...' % ( dat.filename, h5filename )

	pbar = ProgressBar( dat.samples() )
	while dat.tell() != dat.samples():
		samp_idx = dat.tell()
		read_block = int( dat.samplingrate() )
		signal, states = dat.decode( nsamp = read_block, apply_gains = False )
		read_block = signal.shape[1]

		eeg_dset[ samp_idx:( samp_idx + read_block ), : ] = signal.T
		for idx, label in enumerate( aux_labels ):
			aux_dset[ samp_idx:( samp_idx + read_block ), idx ] = np.squeeze( states[ label ] )

		pbar.animate( dat.tell() )

	return outfile
Beispiel #23
0
def zscore_run(randomizer, randomizer_name):
    pb = ProgressBar(len(ZSCORES) * len(NOISES))
    sys.stderr.write("Beginning zscore eval with %s randomization.\n" % randomizer_name)
    pb.errput()
    for percent_noise in NOISES:
        this_rand = lambda d: randomizer(d, percent_noise)

        cleaners = [zscore and RemoveDeviantRatings(zscore).scores or BaselineCleaner().scores
                    for zscore in ZSCORES]
        parameters = [dict(cleaner='zscore', p=percent_noise, randomizer=randomizer_name, zscore=str(zscore))
                      for zscore in ZSCORES]

        for row in run_experiment(this_rand, cleaners, parameters):
            yield row
            pb.incr_and_errput()
Beispiel #24
0
def cmd_dump(logger, args):
    """
    Dump recorded values into a file or to stdout.
    """
    if logger.is_busy():
        print("Logger is currently recording.")
        return 1
    config = logger.configuration()
    if config["count"] == 0:
        print("No records available (nothing has been logged).")
        return 0
    output = _open_output(args, config)
    progress = None if args.no_progress else ProgressBar(config['count'])
    if args.data_format:
        data_format = args.data_format
    else:
        data_format = "%c;%d;%t"
        if config["humidity"]:
            data_format += ";%h"
    counter = 0
    for values in logger:
        for value in values:
            stamp, temp, hum = value
            record = _format_record(data_format, counter,
                                    stamp.strftime(args.time_format), temp,
                                    hum if config["humidity"] else None)
            print(record, file=output)
            counter += 1
        if progress is not None:
            progress += len(values)
    if progress is not None:
        print()
    return 0
Beispiel #25
0
def trim_samples(samples,nsamp,pbar=False):

    weights = np.array([s.w for s in samples])
    weights /= max(weights)
    neff = np.sum(weights)
    n    = weights.size 

    print "effective number of samples: " , neff, "/", n

    # Now trim off the ones that are too small
    ntarget = sum([ w if w<1.0/n else 1 for w in weights]) + 0.0

    if nsamp>0 and nsamp<ntarget:
        weights *= nsamp/neff
    else:
        weights *= n

    if pbar: progress_bar = ProgressBar(samples.size,message="trimming samples ")
    else: print "trimming samples"
    trimmed_samples = []
    for w,s in zip(weights,samples):
        if rand() < w:
            s.w = max(1.0,w)
            trimmed_samples.append(s)

        if pbar: progress_bar()

    trimmed_samples = np.array(trimmed_samples)
    print "Samples trimmed from " , n, " to ", trimmed_samples.size

    return trimmed_samples
Beispiel #26
0
def to_png(tif):
    width, height = tif.size

    img = Image.new('RGBA', (width, height), color=(0, 0, 0, 0))

    prog = ProgressBar(height - 1)

    print("\nTo PNG:")
    for y in range(0, height - 1):
        prog.show(y)
        for x in range(0, width - 1):
            value = tif.getpixel((x, y))
            setpixel(x, y, value, img)

    print("\n")
    return img
Beispiel #27
0
    def _align(self, Ys, X, sel=None, record=True):

        pb = ProgressBar(final=len(Ys), label='align', tail_label='{i:}')
        if record:
            info = np.zeros((len(Ys), 11))

        # set defaults
        sel = sel or slice(None)

        X_sel = X[:, sel]
        t0 = self._calc_centroid(X_sel)
        X0 = X - t0  # centre reference on origin
        X0_sel = X0[:, sel]

        for i, Y_ in enumerate(Ys):
            pb.update()
            Y_sel = Y_[:, sel]
            t = self._calc_centroid(Y_sel)
            Y0_ = Y_ - t  # centre frame on origin
            Y0_sel = Y0_[:, sel]
            R = self._calc_optrot(X0_sel, Y0_sel)  # get rotation matrix
            Y0 = np.dot(R, Y0_.T)  # rotate
            Y = Y0 + t0  # centre frame on t0
            Ys[i, :, :] = Y

            if record:
                # ref/frame comparison before alignment
                info[i, 0] = self._calc_rmsd(X, Y_)  # all
                info[i, 2] = self._calc_rmsd(X0, Y0_)  # rotational
                info[i, 3] = self._calc_rmsd(X0_sel,
                                             Y0_sel)  # rotational selection

                # ref/frame comparison after alignment
                info[i, 4] = self._calc_rmsd(X, Y)  # all
                info[i, 5] = self._calc_rmsd(X0, Y0)  # rotational

                # aligned/unaligned frame comparisons
                info[i, 6] = self._calc_rmsd(Y_, Y)  # all
                info[i, 7] = self._calc_rmsd(Y0, Y0_)  # rotational

                # ref/frame and aligned/unaligned
                info[i, 8] = self._calc_rmsd(t, t0)  # centre difference

                # euler angles
                info[i, 8:11] = self._calc_euler(R)

        return (Ys, info) if record else Ys
Beispiel #28
0
async def task_runner(task_q: asyncio.Queue[tuple[AuditTask, LogQueue]],
                      result_aggr: ResultAggregator,
                      progress: ProgressBar) -> None:
    await asyncio.sleep(
        random.random()
    )  # Random jitter at the start so that not all requests get fired immediately
    while True:
        task, logqueue = await task_q.get()
        progress.task_running()
        progress.enter_stage(CheckStage.preprocess)
        await task.audit_path.mkdir(exist_ok=True, parents=True)
        progress.exit_stage(CheckStage.preprocess)
        await task.run(result_aggr)
        progress.enter_stage(CheckStage.postprocess)
        await logqueue.flush()
        progress.exit_stage(CheckStage.postprocess)
        task_q.task_done()
Beispiel #29
0
def background_thread():
    """Example of how to send server generated events to clients."""
    while True:
        socketio.sleep(1)
        ret = []
        while not SocketQueue.res_queue.empty():
            ProgressBar.now_length += 1
            ret.append(SocketQueue.res_queue.get())
        if ret:
            socketio.emit('my_response', {
                'data': ret,
                'width': str(ProgressBar.calculate()) + '%'
            })
        socketio.emit('my_response', {
            'data': ret,
            'width': str(ProgressBar.calculate()) + '%'
        })
Beispiel #30
0
    def __init__(self, pdb, dmin, dmax, expand=True):

        # inherit from Mixin
        self.enumerate_hkl = self._enumerate_hkl_numpy

        # enable access to self.F[h, k, l] with slicing support
        self.F = self._make_FhklArray()

        # coordinates
        if expand:
            pdb = pdb.get_unitcell()

        uc_xyzc = np.dot(pdb.S, [pdb.x, pdb.y, pdb.z])
        self.n = len(pdb)

        # reflections
        hkls, d = self._enumerate_hkl_numpy(dmin, dmax, S=pdb.S)
        stol2 = 1.0 / (4.0 * d * d)

        # Cromer-Mann scattering factors
        assert os.path.isfile('cm.pkl')
        with open('cm.pkl', 'rb') as _f:
            _A, _B, _C = pickle.load(_f)
        f0 = {
            e:
            _C[e] + sum(_A[e][i] * np.exp(-_B[e][i] * stol2) for i in range(4))
            for e in set(pdb.e)
        }

        # direct summation for each summand
        self.Fatoms = np.zeros((len(pdb), len(hkls)), dtype=complex)
        p = ProgressBar(final=len(pdb),
                        label='directsum',
                        tail_label='{f:>10}')

        for i, (xyzc, e, n, b) in enumerate(zip(uc_xyzc.T, pdb.e, pdb.n,
                                                pdb.B)):
            p.update()
            hx = np.sum(hkls * xyzc, axis=1)
            self.Fatoms[i, :] = n * f0[e] * np.exp(-b * stol2) * np.exp(
                np.pi * 2j * hx)

        # save drange
        self._hkls = hkls
        self._d = d
        self.selection()
Beispiel #31
0
def font_file_cracker():
    """
    接受字体文件,返回破解结果
    :return:
    """
    file_suffix = None
    try:
        file = request.files.get('font_file')
        type_ = request.form.get('type')
    except Exception as _e:
        return jsonify({'code': 400, 'msg': f'lose args,{_e}', 'res': {}})

    filename = re.sub('[(()) ]', '', file.filename)
    file.save('./font_collection/' + filename)

    if config.is_online and not check_file('./font_collection/' + filename):
        return jsonify({'code': 300, 'msg': 'Please use example file(*^_^*)'})

    ProgressBar.init()

    try:
        res = ocr_processor(filename, request.remote_addr, has_pic_detail=True)

        if type_ == 'html':
            font_dict = {}
            for idx, foo in enumerate(res):
                if foo['ocr_result']:
                    res[idx]['ocr_result'] = foo['ocr_result'][0]['simPred']
                    font_dict[foo['name']] = foo['ocr_result']
                else:
                    res[idx]['ocr_result'] = 'undefined'
                    font_dict[foo['name']] = 'undefined'

            return jsonify({
                'code': 200,
                'html': render_template('images.html', result=res),
                'font_dict': font_dict
            })
        else:
            return jsonify({'code': 200, 'msg': 'success', 'res': res})

    except Exception as _e:
        if file_suffix:
            shutil.rmtree('./fontforge_output/' + file_suffix)
        return jsonify({'code': 400, 'msg': f'{_e}', 'res': {}})
Beispiel #32
0
def svd_run(randomizer, randomizer_name):
    pb = ProgressBar((K + 1) * len(NOISES))
    sys.stderr.write("Beginning SVD eval with %s randomization.\n" % randomizer_name)
    pb.errput()
    for percent_noise in NOISES:
        this_rand = lambda d: randomizer(d, percent_noise)

        parameters = [{
            'cleaner': 'svd',
            'p_noise': percent_noise,
            'randomizer': randomizer_name,
            'k': str(k)
        } for k in [None] + range(1, K + 1)]
        cleaners = [BaselineCleaner().scores] + [c.scores for c in create_svd_cleaners(K)]

        for row in run_experiment(this_rand, cleaners, parameters):
            yield row
            pb.incr_and_errput()
Beispiel #33
0
def compute_slices(fsamples,xs,pbar=False):
    if pbar: progress_bar = ProgressBar(fsamples.size,message="computing slices ")
    else: print "computing slices"
    slices = []

    for f in fsamples:
        slices.append(f(xs))
        if pbar: progress_bar()
                     
    return np.array(slices).T                   # return transpose
Beispiel #34
0
def compute_kernels(slices,weights,pbar=False):
    if pbar: progress_bar = ProgressBar(slices.size,message="computing kernels")
    else: print "computing kernels"
    kernels = []

    for s in slices:
        kernels.append(gaussian_kde(s,weights=weights))
        if pbar: progress_bar()
                     
    return np.array(kernels)
Beispiel #35
0
def rebuild_random(magic, data):
    """Create a random.Random() object from the data and the magic vectors

    :param list[str] magic: magic vector data
    :param str data: observed output from Mersenne Twister
    :rtype: random.Random
    """
    progress = ProgressBar()
    data_vals = [ord(d) for d in data]
    state = [0L for _ in xrange(N)]
Beispiel #36
0
def compute_masses(kernels,y,pbar=False):

    if pbar: progress_bar = ProgressBar(kernels.size,message="computing masses ")
    else: print "computing masses"
    masses = []

    for k in kernels:
        masses.append( compute_pmf(y,k) )         # compute M(x,y) for each value
        if pbar: progress_bar()

    return np.array(masses).T             # return the transpose
Beispiel #37
0
def download_images(uri_list, download_location, retain_original_naming=True):
    num = len(uri_list)
    progress = ProgressBar(num, fmt=ProgressBar.FULL)
    for i, src in enumerate(uri_list):
        ############################### DO WORK HERE ###########################
        try:
            img_data = urlopen(src).read()
            if len(img_data) > 0:  #Read Success
                filename = basename(urlsplit(src)[2]).strip()
                if not retain_original_naming:
                    filetype = filename.split('.')[-1]
                    filename = str(i + 1) + '.' + filetype
                output = open(os.path.join(download_location, filename), 'wb')
                output.write(img_data)
                output.close()
        except Exception as e:
            log_error(e)
        ############################### END OF WORK #$##########################
        progress.current += 1
        progress()
        sleep(0.001)
    progress.done()
Beispiel #38
0
def normalmap(hm, sobelscale):
    width, height = hm.size

    nm = Image.new('RGBA', (width, height), color=(0, 0, 0, 0))

    prog = ProgressBar(height)

    print("\nNormal Map:")

    for y in range(0, height):
        prog.show(y)
        for x in range(0, width):
            value = hm.getpixel((x, y))
            if value != -2147483648:
                normal = sobel(x, y, hm, sobelscale)
                color = (\
                    int(normal.x * 255),
                    int(normal.y * 255),
                    int(normal.z * 255), 255)
                nm.putpixel((x, y), color)

    return nm
Beispiel #39
0
def run_mcmc_save(sampler,startPos,nSteps,rState,file,**kwargs):
    '''runs and MCMC chain with emcee, and saves steps to a file'''
    #open chain save file
    if file:
        f = open(file,"w")
        f.close()
    iStep = 0
    bar = ProgressBar()
    for pos, prob, state in sampler.sample(startPos,iterations=nSteps,rstate0=rState,storechain=True,**kwargs):
        if file:
            f = open(file,"a")
        bar.render(int(100*iStep/nSteps),'running MCMC')
        iStep += 1
        for k in range(pos.shape[0]):
            # loop over all walkers and append to file
            thisPos = pos[k]
            thisProb = prob[k]
            if file:
                f.write("{0:4d} {1:s} {2:f}\n".format(k," ".join(map(str,thisPos)),thisProb ))
        if file:        
            f.close()
    return sampler
Beispiel #40
0
def listen( data, rate ):
	"""
	Listen to a one-dimensional numpy array

	Keyword Arguments:
	data -- A one-dimensional numpy array
	rate -- The data sampling rate.  This has nothing to do with the sampling 
		rate of the audio output from your speakers.

	Depends on pyaudio
	"""
	import pyaudio
	p = pyaudio.PyAudio()
	stream = p.open( format = pyaudio.paFloat32, channels = 1, 
		rate = int( rate ), output = True )

	data = np.squeeze( data )
	data = data / max( abs( max( data ) ), abs( min( data ) ) )
	
	start = 0
	rate = int( rate )
	read_block = rate / 4 # Read in 0.25 sec increments
	audio_length = len( data ) / float( rate )
	pbar = ProgressBar( audio_length )
	print 'Listening to data (%f sec)...  Interrupt (^C) to stop.' % audio_length
	try:
		while start != len( data ):
			end = min( len( data ), start + read_block )
			pbar.animate( start / float( rate ) )
			stream.write( data[ start:end ].astype( 'float32' ).tostring() )
			start = end
	except KeyboardInterrupt:
		pass

	stream.stop_stream()
	stream.close()

	p.terminate()
Beispiel #41
0
def run_ptmcmc_save(sampler,startPos,nSteps,file,**kwargs):
    '''runs PT MCMC and saves zero temperature chain to file'''
    if not os.path.exists(file):
        f = open(file,"w")
        f.close()

    iStep = 0    
    bar = ProgressBar()
    for pos, prob, like in sampler.sample(startPos,iterations=nSteps,storechain=True,**kwargs):
        bar.render(int(100*iStep/nSteps),'running MCMC')
        iStep += 1
        f = open(file,"a")
        # pos is shape (ntemps, nwalkers, npars)
        # prob is shape (ntemps, nwalkers)
        # loop over all walkers for zero temp and append to file
        zpos = pos[0,...]
        zprob = prob[0,...]
        for k in range(zpos.shape[0]):
            thisPos = zpos[k]
            thisProb = zprob[k]
            f.write("{0:4d} {1:s} {2:f}\n".format(k," ".join(map(str,thisPos)),thisProb ))
    f.close()
    return sampler    
Beispiel #42
0
mappings = pd.read_csv(sys.argv[1], sep="\t")

synsets = mappings.Synset[mappings.Synset.notnull()]
synsets = [y for x in synsets.map(lambda z: z.split()) for y in x]

def fetch_image_urls(synset):
    data = fetch.fetch_data(MAPPING_URL % synset)
    image_mappings = [y.split() for y in data.split("\r\n") if y]
    return image_mappings

def fetch_hypos(synset):
    data = fetch.fetch_data(HYPO_URL % synset)
    return data.replace("-", "").split("\r\n")


pb = ProgressBar(len(synsets))
pb.errput()
for synset in synsets:
    image_urls = fetch_image_urls(synset)
    if len(image_urls) == 0:
        children_synsets = fetch_hypos(synset)
        children_urls = [fetch_image_urls(cs) for cs in children_synsets]
        image_urls = [y for x in children_urls for y in x]

    for imgid, url in image_urls:
        print "%s\t%s\t%s" % (synset, imgid, url)

    pb.incr_and_errput()


Beispiel #43
0
	# Check that an IP/Hostname was sent
	usage()
	# Load Configuration
	con = Con('ports.ini');

	targetIP = sys.argv[1]

	ipparse = IPParse(targetIP)
	ips = ipparse.parse()
	ipmatch = re.compile('(\d{1,3}\.{1}){3}\d{1,3}')
	for ip in ips:
		if (ipmatch.match(ip) == None):
			ip = gethostbyname(ip)
		# Check if the host is up first
		if (is_host_up(ip) == False):
			print "DOWN: %s is down" % ip
			continue

		print 'Starting scan on host: ', ip
		p = ProgressBar(1024)
		p.update_time(10)
		open_ports = {}
		for i in range(10, 1024):
			p.update_time(i)
			if(scan_port(ip, i)):
				# right justify for sorting
				open_ports[str(i).rjust(5, '0')] = con.get(i)

		for port in sorted(open_ports.iterkeys()):
			print "Port (%s): %s" % (str(int(port)).rjust(5, ' '), open_ports[port])
Beispiel #44
0
def spectrogram( lts, channel, events, length = None, baseline_length = None,
	baseline_offset = 0, nfft = 1024, shift = 10, progress_bar = False ):
	"""
	Generate a spectrogram based on a query into the dataset.  Internally, this uses
	matplotlib.mlab.specgram, so this is a FFT-based spectrogram.

	       <---------------------------------||||||||||||||||||||------->
	data = [baseline_length][baseline_offset]| event[start_idx] |[length]
	       <---------------------------------||||||||||||||||||||------->

	Returns: ( avg_spec, f, t )
	avg_spec = Average spectrogram array
	f = frequency labels for axis 0 of avg_spec
	t = time labels for axis 1 of avg_spec

	Keyword Arguments:
	lts -- LabeledTimeseries object to use as the input dataset.

	events -- An ndarray of event objects to generate spectrograms for.
		Hint: use H5EEGEvents.query_events(...) for this!

	channel -- The channel within lts to create the spectrogram from.

	length -- Length of the spectrogram in samples.  This value overrides event
		duration and is used to define spectrogram length for flag events.
		(Default: None -- length defined by event duration)

	baseline_length -- The number of samples of 'baseline' period to collect before
		each event.  This data segment will be transformed into a spectrogram from
		which a baseline distribution will be defined per-frequency bin for Z-scoring.
		NOTE: Set baseline_length = 0 to disable baseline referencing.
		(Default: None -- baseline_length = length.  If length == none, use event duration)

	baseline_offset -- The number of samples to offset before the event start_idx
		before the baseline period ends.  See diagram above for clarification.
		(Default: 0 -- No samples of baseline offset)

	nfft -- Length of fft window.  Should be a power of two.  Used for both baseline
		and spectrogram. Should be shorter than length and baseline_length.
		(Default: 1024)

	shift -- Time shift of fft windows in samples.  overlap = nfft - shift.
		(Default: 10)

	progress_bar -- Show a progress bar.
		(Default: False)

	FIXME: Specify lengths in seconds, because we have a samplingrate.
	TODO: Parallelism
	"""
	overlap = nfft - shift
	if baseline_length == None: baseline_length = length

	# Calculate a spectrogram for every event matching the query
	avg_spec = None
	num_spectrograms = 0.0
	if progress_bar:
		from progress import ProgressBar
		pbar = ProgressBar( len( events ) )

	for idx, event in enumerate( events ):

		# Acquire information about the event
		name = event['name']
		start_idx = event['start_idx']
		duration = event['duration']

		# Skip the event if we can't find a good length for it.
		if duration == 0: duration = length
		if duration == None: continue;

		# Acquire the data for this spectrogram and calculate the spectrogram.
		data = lts[ start_idx:start_idx + duration, channel ]
		spec, f, t = specgram( data, NFFT = nfft, Fs = lts.get_rate(), noverlap = overlap )
		spec = 20.0 * np.log10( spec )

		# Reference to baseline if we can
		if baseline_length != 0:

			# Acquire the baseline data
			if baseline_length == None: b_len = duration
			else: b_len = baseline_length
			baseline_start = start_idx - b_len - baseline_offset
			baseline_end = start_idx - baseline_offset
			baseline_data = lts[ baseline_start:baseline_end, channel ]

			# Calculate the baseline spectrogram and determine distribution
			baseline_spec, f_base, t_base = specgram( baseline_data, NFFT = nfft,
				Fs = lts.get_rate(), noverlap = overlap )
			baseline_spec = 20.0 * np.log10( baseline_spec )
			mu = np.mean( baseline_spec, axis = 1 )
			sigma = np.std( baseline_spec, axis = 1 )

			# Z-Score the spectrogram by the baseline distribution.
			def zscore( data, mu, sigma ):
				return np.divide( np.subtract( data, mu ), sigma )
			spec = np.apply_along_axis( zscore, 0, spec, mu, sigma )

		# Add the spectrogram to the list of spectrograms
		if avg_spec == None: avg_spec = spec
		else: avg_spec = np.add( avg_spec, spec )
		num_spectrograms += 1.0

		# Update the progress
		if progress_bar: pbar.animate( idx )

	# Sanity Check
	if num_spectrograms == 0.0:
		raise Warning( "No spectrograms generated -- Did you forget to specify Length?" )
		return None, None, None

	# Average the plot and return the calculated parameters
	np.divide( avg_spec, num_spectrograms, out = avg_spec )
	return avg_spec, f, t
Beispiel #45
0
def frequency_feature( lts, events, length = None, band = ( 70, 110 ), smooth = 10,
	baseline_length = None, baseline_offset = 0, baseline_features = False,
	progress_bar = False, exclude_ch = [] ):
	"""
	Generate average frequency features from a LabeledTimeseries across events.

	       <---------------------------------||||||||||||||||||||------->
	data = [baseline_length][baseline_offset]| event[start_idx] |[length]
	       <---------------------------------||||||||||||||||||||------->

	Returns: ( features, obs_labels, ch_labels, t_labels )
		features -- ndarray of frequency features ( obs (events) x ch x samples (time) )
		obs_labels -- labels of the observation axis of features
		ch_labels -- labels of the ch axis of features.  Equal to lts.get_labels()
		t_labels -- time labels in seconds.

	Keyword Arguments:
	lts -- LabeledTimeseries object to use as the input dataset.

	events -- An ndarray of event objects to generate features for.
		Hint: use H5EEGEvents.query_events(...) for this!

	length -- Length of the feature in seconds.  This value overrides event
		duration and is used to define feature length for flag events.
		(Default: None -- length defined by event duration)

	band -- Frequency band in Hz defined by a tuple ( cuton, cutoff ).  Data will be
		filtered by an FIR bandpass filter and hilbert-transformed to extract the power
		in that band.
		(Default: ( 70, 110 ) High Gamma

	smooth -- The power envelope is low-pass filtered to this frequency.  Used to
		smooth feature output.  Set to 0 for no additional smoothing
		(Default: 10 Hz)

	baseline_length -- The number of samples of 'baseline' period to collect before
		each event.  This data segment will be have features extracted from
		which a baseline distribution will be defined per-electrode for Z-scoring.
		NOTE: Set baseline_length = 0 to disable baseline referencing.
		(Default: None -- baseline_length = length)

	baseline_offset -- The number of samples to offset before the event start_idx
		before the baseline period ends.  See diagram above for clarification.
		(Default: 0 -- No samples of baseline offset)

	baseline_features -- Append features for the baseline periods rather than 
		referencing/zscoring the features by baseline.
		NOTE: This will double the number of features/observations
		(Default: False -- Do not append baseline features)

	progress_bar -- Show a progress bar.
		(Default: False)

	Depends on BCPy2000 hilbert.
	FIXME: Remove Dependency
	TODO: Parallelism
	"""
	import BCPy2000.Paths
	from BCPy2000.SigTools.Basic import hilbert

	# Take sampling rate into account
	length = int( length * lts.get_rate() )
	if baseline_length == None: baseline_length = length
	if baseline_length == None: baseline_length = 0
	buffer_samples = int( 0.2 * lts.get_rate() )

	# Calculate filter coefficients
	smooth = smooth / float( lts.get_rate() )
	smooth_coefs = None
	if smooth != 0.0:
		smooth_coefs = firwin( 64, cutoff = smooth, window = "hamming" )
		smooth_coefs = smooth_coefs / sum( smooth_coefs ) # Correct Gain

	# Determine channel indices
	def bad_ch( ch ):
		for q in exclude_ch: 
			if q in ch: return True
		return False
	channels = np.array( [ ch for ch in lts.get_labels() if not bad_ch( ch ) ] )

	features = []
	obs_labels = []

	def add_observation( name, feature ):
		if smooth_coefs != None:
			feature = filtfilt( smooth_coefs, [1.0], feature, axis = 0 )
		features.append( feature.T )
		obs_labels.append( name )

	# Calculate features for every event matching the query
	if progress_bar:
		from progress import ProgressBar
		pbar = ProgressBar( len( events ) )
	for idx, event in enumerate( events ):

		# Acquire information about the event
		name = event['name']
		start_idx = event['start_idx']
		duration = event['duration']

		# Skip the event if we can't find a good length for it.
		# FIXME: Logic is off here.
		if duration == 0: duration = length
		if duration == None: continue;

		# Define start and end of feature extraction epoch
		end = start_idx + duration + buffer_samples
		start = start_idx - buffer_samples

		# Acquire the data for this spectrogram and calculate the spectrogram.
		observation = lts[ start:end, channels ]
		obs_features = hilbert( observation, band = band, return_dict = True,
			samplingfreq_hz = lts.get_rate() )[ 'amplitude' ]

		# Reference to baseline if we can
		if baseline_length != 0:

			# Acquire the baseline data
			baseline_start = start_idx - baseline_length - baseline_offset - buffer_samples
			baseline_end = start_idx - baseline_offset + buffer_samples
			baseline_data = lts[ baseline_start:baseline_end, channels ]

			# Calculate the baseline features and determine distribution
			obs_baseline = hilbert( baseline_data, band = band, return_dict = True,
				samplingfreq_hz = lts.get_rate() )[ 'amplitude' ]
			obs_baseline = obs_baseline[ buffer_samples:-buffer_samples, : ]

			if baseline_features == False:
				# Z-Score the features by the baseline distribution.
				# TODO: This isn't statistically sound, this assumes 
				# independence in the time-series
				mu = np.mean( obs_baseline, axis = 0 )
				sigma = np.std( obs_baseline, axis = 0 )
				def zscore( data, mu, sigma ):
					return np.divide( np.subtract( data, mu ), sigma )
				obs_features = np.apply_along_axis( zscore, 1, obs_features, mu, sigma )
			else: add_observation( 'BASELINE_' + name, obs_baseline )

		# Add the features to the list of features
		obs_features = obs_features[ buffer_samples:-buffer_samples, : ]
		add_observation( name, obs_features )

		# Update the progress
		if progress_bar: pbar.animate( idx )

	# Return the feature array
	features = np.array( features )
	t_labels = np.arange( features.shape[-1] ) / float( lts.get_rate() )
	return ( features, obs_labels, channels, t_labels )
Beispiel #46
0
from progress import ProgressBar
from time import sleep
from random import random

my_list = range(20)

print 'TEST: Using a default ProgressBar'
sleep(1.0)
pb = ProgressBar(len(my_list), 20)
pb.start()
for i in my_list:
    sleep(0.3)
    pb.tick()
pb.finish()

print 'TEST: Using a custom design'
sleep(1.0)
pb = ProgressBar(len(my_list), 20, bookends='<{}>', 
        bar_char='/', empty_char='-')
pb.start()
for i in my_list:
    sleep(0.3)
    pb.tick()
pb.finish()

print 'TEST: Using a minimal design'
sleep(1.0)
pb = ProgressBar(len(my_list), 20, bookends='', 
        bar_char='-', empty_char=' ', show_percent=True, show_iter=False,
        show_time=False)
pb.start()
    v = np.random.uniform(low=0, high=1, size=N//2)
    theta = 2*np.pi*u
    phi = np.arccos(2*v-1)
    spheres[:N//2,0] = rho*np.cos(theta)*np.sin(phi)+.5
    spheres[:N//2,1] = rho*np.sin(theta)*np.sin(phi)+.0
    spheres[:N//2,2] = rho*np.cos(phi)+.5
    spheres[N//2:,0] = rho*np.cos(theta)*np.sin(phi)+.5
    spheres[N//2:,1] = rho*np.sin(theta)*np.sin(phi)+1.0
    spheres[N//2:,2] = rho*np.cos(phi)+.5 


    samples = sphere
    np.random.seed(123)
    net = DSOM((n,n,3), elasticity=1.0, init_method='fixed')
    I = np.random.randint(0,samples.shape[0], epochs)
    bar = ProgressBar(widgets=[Percentage(), Bar()], maxval=epochs).start()
    plotfile = '/tmp/plot.txt'
    datafile = '/tmp/data.txt'
    rot_x, rot_z = 65,225


    for i in range(epochs):
        if i == (epochs//2):
            samples = spheres
            I = np.random.randint(0,samples.shape[0], epochs)

        if i%5 == 0:
            rot_x = 20+(1+np.cos(i/float(epochs)*4*np.pi))*45
            rot_z = (rot_z+1) % 360
            filename = '/tmp/image-%05d' % i
            file = open(plotfile, 'w')
print "parsing file '%s'" % sopts.inputfile
neighbor = ""
lc = 0

# open input+output file
fh = open(sopts.inputfile, 'r')
fhout = open(sopts.outputfile, 'w')	# rewrite
fhout.write("timestamp,neighbor,capabilities,mrr_T,mrr_t,mrr_P,rate,throughput,ewma_prob,this_prob,this_succ,this_attempt,success,attempts\n")
linebuffer = []
headers = False
t0 = time()
print "getting length of file... ",
numlines = get_num_lines(sopts.inputfile)
if (numlines > -1):
	print "%d lines" % numlines
	progress = ProgressBar(50)
	progress.show(0)
else:
	print "failed"

while (main_loop):
	try:
		filepos = fh.tell()	# remember position before reading
		line = fh.readline()
		if ((not "\n" in line) and (line != '')):
			fh.seek(filepos)
		else:
			if (line != ''):
				lc += 1
				if (lc % 10000 == 0 and numlines > -1):
					duration = (time() - t0)
Beispiel #49
0
def main(plistpath, dest, options=None):
    if options:
        flat = options.flat
        delete = options.delete
        pretend = options.pretend

    plist = open(plistpath)
    srcset = set()
    destset = set()

    p.message("Parsing playlist.", 1)

    playlistpaths = []

    for line in plist:
        if line[0] == '#':
            continue

        if line[-1] == '\n':
            path = line[:-1] # strip the new line
        else:
            path = line

        playlistpaths.append(path)

    if len(playlistpaths) == 0:
        p.message("Playlist is empty!", 1)
        sys.exit()

    p.message("Loading metadata from playlist files.", 1)
    if p.level > 1 and p.level < 4:
        bar = ProgressBar(len(playlistpaths),"numbers")
        bar.draw()
    i = 0
    totalbytes=0

    for path in playlistpaths:
        try:
            song = getMetaData(path)
            totalbytes += os.path.getsize(path)

            srcset.add(song)
        except (OSError, IOError) as e:
            p.message("\nError loading {0}: {1}".format(path, e.strerror), 2)
        i += 1
        if p.level > 1 and p.level < 4:
            bar.update(i)

    p.message("{0} files, {1} MB".format(len(playlistpaths), totalbytes/(1024.0*1024.0)), 2)

    p.message("Loading existing files", 1)
    existingFilePaths = []

    for dirpath, dirnames, filenames in os.walk(dest):
        for path in filenames:
            if path[-3:] == 'mp3':
                fullpath = os.path.join(dirpath, path)
                existingFilePaths.append(fullpath)

    if len(existingFilePaths) > 0:
        p.message("Loading metadata from existing files.", 2)
        if p.level > 1 and p.level < 4:
            bar = ProgressBar(len(existingFilePaths),"numbers")
            bar.draw()
        i = 0

        for path in existingFilePaths:
            try:
                song = getMetaData(path)
                destset.add(song)
            except HeaderNotFoundError:
                # This is when the mp3 file in place is malformed, like when it is
                # only a partial file
                os.remove(path)
            except IOError:
                # Something wierd happened
                p.message("File not found" + path, 2)
            i += 1
            if p.level > 1 and p.level < 4:
                bar.update(i)
    else:
        p.message("No existing files", 3)

    toAdd = srcset - destset
    toDel = destset - srcset

    # we can't just take the intersection, because we need the version from
    # dest
    toCheck = set()
    for song in destset:
        if song in srcset:
            toCheck.add(song)

    # Delete songs that shouldn't be there (if we should delete things)
    if delete and len(toDel) > 0 and not pretend:
        p.message("Deleting songs", 1)
        for song in toDel:
            os.remove(song.mp3path)
    else:
        p.message("Not deleting: delete flag={0}, pretend={1} len(toDel)={2}".format(delete,pretend,len(toDel)),5)

    # Move songs around that are already there, but possibly not in the right
    # place
    first = False
    if len(toCheck) > 0:
        for song in toCheck:
            data = song.data(root=dest)
            data['artist'] = sanitize(data['artist'])
            data['album'] = sanitize(data['album'])
            data['title'] = sanitize(data['title'])
            newFile = ""
            if flat == False:
                artistDir = u"{0[root]}/{0[artist]}".format(data)
                albumDir = artistDir + u"/{0[album]}".format(data)
                newFile = albumDir + u"/{0[track]:0>2} {0[title]}.mp3".format(data)

                if not os.path.exists(artistDir):
                    os.mkdir(artistDir)
                if not os.path.exists(albumDir):
                    os.mkdir(albumDir)
            else:
                newFile = u"{0[root]}/{0[artist]} - {0[album]} - {0[track]:0>2} {0[title]}.mp3".format(data)

            if not song.mp3path == newFile:
                if first:
                    first = False
                    p.message("Organizing old songs", 1)
                if not pretend:
                    shutil.move(song.mp3path, newFile)

    # Copy new songs
    if len(toAdd) > 0:
        p.message("Copying songs", 1)
        if p.level > 1 and p.level < 4:
            bar = ProgressBar(len(toAdd),"numbers")
            bar.draw()
        i = 0

        for song in toAdd:
            data = song.data(root=dest)
            data['artist'] = sanitize(data['artist'])
            data['album'] = sanitize(data['album'])
            data['title'] = sanitize(data['title'])
            newPath = ""
            if flat == False:
                artistDir = u"{0[root]}/{0[artist]}".format(data)
                albumDir = artistDir + u"/{0[album]}".format(data)
                newPath = albumDir + u"/{0[track]:0>2} {0[title]}.mp3".format(data)

                if not os.path.exists(artistDir) and not pretend:
                    os.mkdir(artistDir)
                if not os.path.exists(albumDir) and not pretend:
                    os.mkdir(albumDir)
            else:
                newPath = u"{0[root]}/{0[artist]} - {0[album]} - {0[track]:0>2} {0[title]}.mp3".format(data)

            p.message("Copying {0}".format(newPath), 4)
            if not pretend:
                try:
                    shutil.copyfile(song.mp3path, newPath)
                except IOError as e:
                    p.message("Error copying {0}: {1}".format(newPath, e.strerror), 3)
            i += 1
            if p.level > 1 and p.level < 4:
                bar.update(i)
    else:
        p.message("All songs already there!", 1)

    p.message("\nDone.", 1)
Beispiel #50
0
def parse_crux_search_txt(filename):
    """Iterate over records in a search.{target,decoy}.txt.

    Crux txt format files are tab-delimited with 30 fields*, described
    in the online documentation [1]. This function returns an iterator
    which yields a dictionary with the fields and their values.

    * 'decoy q-value (p-value)' is not output by Crux, at least as of v1.33.

    [1] http://noble.gs.washington.edu/proj/crux/txt-format.html

    Arguments:
       filename: Name of the crux search-for-matches output.

    Returns:
       Dictionary that maps field names to values. Only fields that
       are non-empty in the input exist in the returned dictionary.
       Many of the fields are not usually set in the output of crux
       search-for-matches, and will not be available.

    """
    fields = ['scan', # int
              'charge', # int
              'spectrum precursor m/z', # float
              'spectrum neutral mass', # float
              'peptide mass', # float
              'delta_cn', # float
              'sp score', # float
              'sp rank', # float
              'xcorr score', # float
              'xcorr rank', # int
              'p-value', # float
              'Weibull est. q-value', # float
              'decoy q-value (xcorr)', # float
              'percolator score', # float
              'percolator rank', # int
              'percolator q-value', # float
              'q-ranker score', # float
              'q-ranker q-value', # float
              'b/y ions matched', # int
              'b/y ions total', # int
              'matches/spectrum', # int
              'sequence', # string
              'cleavage type', # string
              'protein id', # string
              'flanking aa', # string
              'unshuffled sequence', # string
              'eta', # float
              'beta', # float
              'shift', # float
              'corr'] # float
    casts = [ int, int, float, float, float, float, float, float, float, int,
              float, float, float, float, int, float, float, float, int, int,
              int, str, str, str, str, str, float, float, float, float ]
    assert(len(fields) == len(casts))

    _mandatories = [ 'scan', 'charge', 'spectrum precursor m/z',
                     'spectrum neutral mass', 'xcorr score',
                     'xcorr rank', 'sequence' ]

    def conv(f, value):
        value = value.strip()
        if len(value):
            return f(value)

    def validate(record):
        return all(record.has_key(m) for m in _mandatories)

    widgets = [ Percentage(), Bar(), ETA() ]
    progress = ProgressBar(widgets = widgets,
                           maxval = os.path.getsize(filename)).start()

    with open(filename) as f:
        reader = csv.reader(f, delimiter='\t')
        # Header
        row = reader.next()
        if row != fields:
            raise ParseError('Header: ', filename, 1, ' '.join(row))
        # Body
        for row in reader:
            progress.update(f.tell())
            if len(row) != len(fields):
                raise ParseError('Line: ', filename, reader.line_num,
                                 ' '.join(row))

            r = dict((k, conv(f,x)) for k, f, x in zip(fields, casts, row))
            if r:
                if not validate(r):
                    raise ParseError('Missing: ', filename, reader.line_num,
                                     ' '.join(row))
                yield r

    progress.finish()
    sys.stdout.write('\n')
Beispiel #51
0
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from progress import ProgressBar
from animation import *
from time import sleep

p = ProgressBar(" Doing the 1st thing:", max_value=20, width=20)
for i in range(21):
    p.update(i)
    sleep(.25)
p.finish("Success")

a = Animation("Waiting for the 2nd thing:")
for i in range(21):
    a.next()
    sleep(.25)
a.finish("Success")

a = Animation("Waiting for the 3rd thing:", animation=ANIMATE_VGROW)
for i in range(21):
    a.next()
    sleep(.25)
a.finish("Success")

a = Animation("Waiting for the 4th thing:", animation=ANIMATE_HGROW)
for i in range(21):
    a.next()
    sleep(.25)
a.finish("Success")

a = Animation("Waiting for the 5th thing:", animation=ANIMATE_COUNT)
    # white dwarf temp
    twdVals = np.random.normal(loc=args.twd,scale=args.e_twd,size=chainLength)*units.K
    # period
    pVals = np.random.normal(loc=args.p,scale=args.e_p,size=chainLength)*units.d

    # loop over the MCMC chain, calculating system parameters as we go
    
    # table for results
    results = Table(names=('q','Mw','Rw','Mr','Rr','a','Kw','Kr','incl'))
    # need to be a little careful about astropy versions here, since only
    # versions >=1.0 allow quantities in tables
    # function below extracts value from quantity and floats alike
    getval = lambda el: getattr(el,'value',el) 
            
    psolve = partial(solve,baseDir=baseDir)
    data = zip(qVals,dphiVals,rwVals,twdVals,pVals)
    solvedParams = PB.map(psolve,data,multiprocess=True)
    
    print 'Writing out results...'
    # loop over these results and put all the solutions in our results table
    iStep = 0    
    bar = ProgressBar()
    for thisResult in solvedParams:
        bar.render(int(100*iStep/(len(solvedParams))),'Combining data')
        iStep += 1
        if thisResult is not None:
            results.add_row(thisResult)      

    print 'Found solutions for %d percent of samples in MCMC chain' % (100*float(len(results))/float(chainLength))
    results.write('physicalparams.log',format='ascii.commented_header')
Beispiel #53
0
from noisify import *
from progress import ProgressBar

REPEATS = 100


heads, mods, whole, assoc = load_data()
concatted = pd.concat([heads, mods], ignore_index=True)

agg_concat_orig = combine_measures(aggregate_ratings(concatted))['mean']
agg_whole_orig = aggregate_ratings(whole)['mean']

output = []

NUM_DROP = range(1, 26)
pb = ProgressBar(len(NUM_DROP) * REPEATS)
pb.errput()
for n in NUM_DROP:
    this_row = {}
    for i in xrange(REPEATS):
        noisy_concat = replace_subjects(concatted, n)
        noisy_whole = replace_subjects(whole, n)
        clean_concat = remove_most_deviant_subjects(noisy_concat, n)
        clean_whole = remove_most_deviant_subjects(noisy_whole, n)

        agg_concat = combine_measures(aggregate_ratings(noisy_concat))['mean']
        agg_whole = aggregate_ratings(noisy_whole)['mean']

        agg_cl_concat = combine_measures(aggregate_ratings(clean_concat))['mean']
        agg_cl_whole = aggregate_ratings(clean_whole)['mean']
Beispiel #54
0
    zfile.extractall('tmp')

    extract_files('tmp')

    #cleaning up
    shutil.rmtree('tmp')


try:
    url = obj["url"][1:][:-1]
    name = obj["url"][1:][:-1].split('/')[-1]
    filename = os.path.join(DOWNLOADS_FOLDER, name)
    name, ext = os.path.splitext(filename)

    bar = ProgressBar(title="Downloading Started : Downloading %s V. %s " % (name, obj["version"]))

    if(ext not in [".app",".zip",".pkg",".dmg"]):
        bar.update(0, message="Can't handle files of type %s" % ext)
        time.sleep(2)
        raise

    os.chdir(DOWNLOADS_FOLDER)

    def prg(count, blockSize, totalSize):

        percent = int(count * blockSize * 100 / totalSize)
        bar.update(percent,message="(%s%%) Downloading %s " % (percent,url))