コード例 #1
0
ファイル: util.py プロジェクト: fhirschmann/algolab
def angle_between(v1, v2):
    """
    Calculates the angle between vector `v1` and vector `v2`.

    >>> round(angle_between((0, 5), (1, 1)))
    45.0

    :param v1: a vector
    :type v1: sequence of two integers/floats
    :param v2: another vector
    :type v2: sequence of two integers/floats
    :returns: the angle in degrees
    :rtype: float
    """
    v = np.array(v1)
    w = np.array(v2)

    norm_v = norm(v)
    norm_w = norm(w)

    cos_angle = np.around(np.dot(v, w) / norm_v / norm_w, PRECISION)

    if not -1 <= cos_angle <= 1:
        return None
    else:
        return np.around(np.arccos(cos_angle) * 360 / 2 / np.pi, PRECISION)
コード例 #2
0
ファイル: stats.py プロジェクト: zaxtax/pymc3
def r2_score(y_true, y_pred, round_to=2):
    R"""R-squared for Bayesian regression models. Only valid for linear models.
    http://www.stat.columbia.edu/%7Egelman/research/unpublished/bayes_R2.pdf

    Parameters
    ----------
    y_true: : array-like of shape = (n_samples) or (n_samples, n_outputs)
        Ground truth (correct) target values.
    y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
        Estimated target values.
    round_to : int
        Number of decimals used to round results (default 2).

    Returns
    -------
    `namedtuple` with the following elements:
    R2_median: median of the Bayesian R2
    R2_mean: mean of the Bayesian R2
    R2_std: standard deviation of the Bayesian R2
    """
    dimension = None
    if y_true.ndim > 1:
        dimension = 1

    var_y_est = np.var(y_pred, axis=dimension)
    var_e = np.var(y_true - y_pred, axis=dimension)

    r2 = var_y_est / (var_y_est + var_e)
    r2_median = np.around(np.median(r2), round_to)
    r2_mean = np.around(np.mean(r2), round_to)
    r2_std = np.around(np.std(r2), round_to)
    r2_r = namedtuple('r2_r', 'r2_median, r2_mean, r2_std')
    return r2_r(r2_median, r2_mean, r2_std)
コード例 #3
0
ファイル: projaxes.py プロジェクト: montefra/healpy
 def _get_interv_graticule(self,pmin,pmax,dpar,mmin,mmax,dmer,verbose=True):
     def set_prec(d,n,nn=2):
         arcmin=False
         if d/n < 1.:
             d *= 60
             arcmin = True
             nn = 1
         x = d/n
         y = nn*x
         ex = np.floor(np.log10(y))
         z = np.around(y/10**ex)*10**ex/nn
         if arcmin:
             z = 1./np.around(60./z)
         return z
     max_n_par = 18
     max_n_mer = 36
     n_par = (pmax-pmin)/dpar
     n_mer = (mmax-mmin)/dmer
     if n_par > max_n_par:
         dpar = set_prec((pmax-pmin)/dtor,max_n_par/2)*dtor
     if n_mer > max_n_mer:
         dmer = set_prec((mmax-mmin)/dtor,max_n_mer/2,nn=1)*dtor
     if dmer/dpar < 0.2 or dmer/dpar > 5.:
         dmer = dpar = max(dmer,dpar)
     vdeg = np.floor(np.around(dpar/dtor,10))
     varcmin = (dpar/dtor-vdeg)*60.
     if verbose: print "The interval between parallels is %d deg %.2f'."%(vdeg,varcmin)
     vdeg = np.floor(np.around(dmer/dtor,10))
     varcmin = (dmer/dtor-vdeg)*60.
     if verbose: print "The interval between meridians is %d deg %.2f'."%(vdeg,varcmin)
     return dpar,dmer
コード例 #4
0
ファイル: A.py プロジェクト: od0/HW2
def plot_density(count_trips,count,title):
    grid = np.zeros((config.bins,config.bins))
    for (i,j),z in np.ndenumerate(grid):
        try:
            grid[j,i] = float(count[(i,j)]) / float(count_trips[(i,j)])
        except:
            grid[j,i] = 0
        #print "----"
        #print grid[i,j], i, j
        #print count[(i,j)]
        #print count_trips[(i,j)]
    grid = np.flipud(grid) #to counter matshow vertical flip
    fig, ax = plt.subplots(figsize=(10, 10))
    ax.matshow(grid, cmap='spectral')
    ax.xaxis.set_ticks_position('bottom')
    ax.set_xlabel('Longitude')
    ax.set_ylabel('Latitude')
    xticks = np.linspace(config.minlong,config.maxlong,num=round(config.bins/2))
    yticks = np.linspace(config.minlat,config.maxlat,num=round(config.bins/2))
    yticks = yticks[::-1]
    xticks = np.around(xticks,decimals=1)
    yticks = np.around(yticks,decimals=1)
    xspace = np.linspace(0,config.bins-1,config.bins/2)
    yspace = np.linspace(0,config.bins-1,config.bins/2)
    plt.xticks(xspace,xticks)
    plt.yticks(yspace,yticks)
    for (i,j),z in np.ndenumerate(grid):
        ax.text(j, i, '{:0.2f}'.format(z), ha='center', va='center')
    plt.title(title)
    plt.show()
コード例 #5
0
ファイル: regions_detect_knn.py プロジェクト: fierval/retina
    def _get_initial_classes(self):
        images = map(lambda f: cv2.imread(path.join(self._root, f)), self._files)
        self._avg_pixels = np.array([], dtype=np.uint8)

        # extract parts from each image for all of our 6 categories
        for i in range(0, self._n_objects):
            rects = self._rects[:, i]
            
            # compute maximum rectangle
            rows = np.max(rects['f2'] - rects['f0'])
            cols = np.max(rects['f3'] - rects['f1'])

            # extract annotated rectangles
            im_rects = map(lambda (im, r): im[r[0]:r[2],r[1]:r[3],:], zip(images, rects))

            # resize all rectangles to the max size & average all the rectangles
            im_rects = np.array(map(lambda im: cv2.resize(im, (cols, rows)), im_rects), dtype=np.float)
            avgs = np.around(np.average(im_rects, axis = 0))

            # average the resulting rectangle to compute 
            mn = np.around(np.array(cv2.mean(avgs), dtype='float'))[:-1].astype('uint8')

            if(self._avg_pixels.size == 0):
                self._avg_pixels = mn
            else:
                self._avg_pixels = np.vstack((self._avg_pixels, mn))
コード例 #6
0
ファイル: astro.py プロジェクト: sfarrens/python_lib
def radec2xyz(ra, dec, r):

    x = np.around(r * np.cos(deg2rad(ra)) * np.cos(deg2rad(dec)), 8)
    y = np.around(r * np.sin(deg2rad(ra)) * np.cos(deg2rad(dec)), 8)
    z = np.around(r * np.sin(deg2rad(dec)), 8)

    return x, y, z
コード例 #7
0
ファイル: test_MarkovModel.py プロジェクト: BioGeek/biopython
    def test_logvecadd(self):
        vec1 = log(array([1, 2, 3, 4]))
        vec2 = log(array([5, 6, 7, 8]))

        sumvec = array([1.79175947, 2.07944154, 2.30258509, 2.48490665])
        self.assertTrue(
            array_equal(around(MarkovModel._logvecadd(vec1, vec2), decimals=3), around(sumvec, decimals=3)))
コード例 #8
0
ファイル: test_audiotools.py プロジェクト: boylea/sparkle
def test_make_tone_regular_at_caldb():
    fq = 15000
    db = 100
    fs = 100000
    dur = 1
    risefall = 0.002
    calv = 0.1
    caldb = 100
    npts = fs*dur

    tone, timevals = tools.make_tone(fq, db, dur, risefall, fs, caldb, calv)

    assert len(tone) == npts
    assert len(timevals) == npts

    spectrum = np.fft.rfft(tone)
    peak_idx = (abs(spectrum - max(spectrum))).argmin()
    freq_idx = np.around(fq*(float(npts)/fs))
    assert peak_idx == freq_idx

    if tools.USE_RMS is True:
        print 'tone max', np.around(np.amax(tone), 5), calv*np.sqrt(2)
        assert np.around(np.amax(tone), 5) == np.around(calv*np.sqrt(2),5)
    else:
        assert np.around(np.amax(tone), 5) == calv

    assert timevals[-1] == dur - (1./fs)
コード例 #9
0
  def test_metrics_correctness_with_iterator(self):
    layers = [
        keras.layers.Dense(8, activation='relu', input_dim=4,
                           kernel_initializer='ones'),
        keras.layers.Dense(1, activation='sigmoid', kernel_initializer='ones')
    ]

    model = testing_utils.get_model_from_layers(layers, (4,))

    model.compile(
        loss='binary_crossentropy',
        metrics=['accuracy', metrics_module.BinaryAccuracy()],
        optimizer='rmsprop',
        run_eagerly=testing_utils.should_run_eagerly())

    np.random.seed(123)
    x = np.random.randint(10, size=(100, 4)).astype(np.float32)
    y = np.random.randint(2, size=(100, 1)).astype(np.float32)
    dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
    dataset = dataset.batch(10)
    iterator = dataset_ops.make_one_shot_iterator(dataset)
    outs = model.evaluate(iterator, steps=10)
    self.assertEqual(np.around(outs[1], decimals=1), 0.5)
    self.assertEqual(np.around(outs[2], decimals=1), 0.5)

    y = np.zeros((100, 1), dtype=np.float32)
    dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
    dataset = dataset.repeat(100)
    dataset = dataset.batch(10)
    iterator = dataset_ops.make_one_shot_iterator(dataset)
    outs = model.evaluate(iterator, steps=10)
    self.assertEqual(outs[1], 0.)
    self.assertEqual(outs[2], 0.)
コード例 #10
0
ファイル: plot.py プロジェクト: astrophysicsvivien/lalsuite
    def add_worst(self, *p_values):
        """
        Mark the point at which the deviation is largest.

        Parameters
        ----------

        p_values: list or `np.ndarray`
            Same as in `add_series`.
        """
        series = list(self._make_series(p_values))
        for xs, ys in zip(series[0::2], series[1::2]):
            i = np.argmax(np.abs(ys - xs))
            x = xs[i]
            y = ys[i]
            if y == x:
                continue
            self.plot([x, x, 0], [0, y, y], '--', color='black', linewidth=0.5)
            if y < x:
                self.plot([x, y], [y, y], '-', color='black', linewidth=1)
                self.text(
                    x, y, ' {0:.02f} '.format(np.around(x - y, 2)),
                    ha='left', va='top')
            else:
                self.plot([x, x], [x, y], '-', color='black', linewidth=1)
                self.text(
                    x, y, ' {0:.02f} '.format(np.around(y - x, 2)),
                    ha='right', va='bottom')
コード例 #11
0
ファイル: test_audiotools.py プロジェクト: boylea/sparkle
def test_make_tone_irregular():
    fq = 15066
    db = 82
    fs = 200101
    dur = 0.7
    risefall = 0.0015
    calv = 0.888
    caldb = 99
    npts = int(fs*dur)

    tone, timevals = tools.make_tone(fq, db, dur, risefall, fs, caldb, calv)

    print 'lens', npts, len(tone), len(timevals)
    assert len(tone) == npts
    assert len(timevals) == npts

    spectrum = np.fft.rfft(tone)
    peak_idx = (abs(spectrum - max(spectrum))).argmin()
    freq_idx = np.around(fq*(float(npts)/fs))
    assert peak_idx == freq_idx

    print 'intensities', (20 * np.log10(tools.signal_amplitude(tone, fs)/calv)) + caldb, db
    assert np.around((20 * np.log10(tools.signal_amplitude(tone, fs)/calv)) + caldb, 1) == db

    print 'durs', np.around(timevals[-1], 5), dur - (1./fs)
    assert dur - 2*(1./fs) < timevals[-1] <= dur - (1./fs)
コード例 #12
0
ファイル: pYAAPT.py プロジェクト: Parakrant/AMFM_decompy
def nlfer(signal, pitch, parameters):

    #---------------------------------------------------------------
    # Set parameters.
    #---------------------------------------------------------------
    N_f0_min = np.around((parameters['f0_min']*2/float(signal.new_fs))*pitch.nfft)
    N_f0_max = np.around((parameters['f0_max']/float(signal.new_fs))*pitch.nfft)

    window = hanning(pitch.frame_size+2)[1:-1]
    data = np.zeros((signal.size))  #Needs other array, otherwise stride and
    data[:] = signal.filtered     #windowing will modify signal.filtered

    #---------------------------------------------------------------
    # Main routine.
    #---------------------------------------------------------------
    samples = np.arange(int(np.fix(float(pitch.frame_size)/2)),
                        signal.size-int(np.fix(float(pitch.frame_size)/2)),
                        pitch.frame_jump)

    data_matrix = np.empty((len(samples), pitch.frame_size))
    data_matrix[:, :] = stride_matrix(data, len(samples),
                                    pitch.frame_size, pitch.frame_jump)
    data_matrix *= window

    specData = np.fft.rfft(data_matrix, pitch.nfft)

    frame_energy = np.abs(specData[:, N_f0_min-1:N_f0_max]).sum(axis=1)
    pitch.set_energy(frame_energy, parameters['nlfer_thresh1'])
    pitch.set_frames_pos(samples)
コード例 #13
0
def test_circmean_against_scipy():
    # testing against scipy.stats.circmean function
    # the data is the same as the test before, but in radians
    data = np.array([0.89011792, 1.1693706, 0.6981317, 1.90240888, 0.54105207,
                     6.24827872])
    answer = scipy.stats.circmean(data)
    assert_equal(np.around(answer, 2), np.around(circmean(data), 2))
コード例 #14
0
    def solve_LP_problem(self):
        (f_coef_matrix, f_column_vector) = self.build_function_coef_matrix_and_column_vector()
        (d_coef_matrix, d_column_vector) = self.build_derivative_coef_matrix_and_column_vector()

        # Solve the LP problem by combining constraints for both function and derivative info.
        objective_function_vector = matrix(list(itertools.repeat(1.0, self.no_vars)))
        coef_matrix = sparse([f_coef_matrix, d_coef_matrix])
        column_vector = matrix([f_column_vector, d_column_vector])

        min_sol = solvers.lp(objective_function_vector, coef_matrix, column_vector)
        is_consistent = min_sol['x'] is not None

        # Print the LP problem for debugging purposes.
        if self.verbose:
            self.display_LP_problem(coef_matrix, column_vector)

        if is_consistent:
            self.min_heights = np.array(min_sol['x']).reshape(self.no_points_per_axis)
            print np.around(self.min_heights, decimals=2)

            # Since consistency has been established, solve the converse LP problem to get the
            # maximal bounding surface.
            max_sol = solvers.lp(-objective_function_vector, coef_matrix, column_vector)
            self.max_heights = np.array(max_sol['x']).reshape(self.no_points_per_axis)
            print np.around(self.max_heights, decimals=2)

            if self.plot_surfaces:
                self.plot_3D_objects_for_2D_case()

        else:
            print 'No witness for consistency found.'

        return is_consistent
コード例 #15
0
ファイル: go_mat.py プロジェクト: TuringTW/SoftwareRadar
	def save2mat(self, i):
		global samples0, samples1;
		std0 = np.around(np.std(samples0), 5);
		std1 = np.around(np.std(samples1), 5);
		print 'std0:', std0, 'std1', std1;

		scipy.io.savemat('./data/'+folder_name+'/'+filename+'_'+str(i)+'.mat', mdict={'s0':samples0, 's1':samples1, 'timestamp': self.timestamp, 'fs':self.sdr.sample_rate, 'ref_addr':ref_addr});
コード例 #16
0
def tmatrix_exp_calc(model, fitopts, bin_size, ran_size, spacing):
    ##read trace file from 
    cwd = os.getcwd()
    subdir = model.name
    iteration = fitopts["iteration"]
    
    sub = "%s/%s/iteration_%d" % (cwd,subdir,iteration)
    subdirec = "%s/fitting_%d" % (sub,iteration)
    TMfile = "%s/T_matrix_flat.dat" % subdirec
    
    ## MAY NEED TO MODIFY OPTIONS FOR T-MATRIX
    if not "TMdata" in fitopts:
        Tmatrixfile = "%s/T_matrix_exp.dat" % cwd
    else:
        Tmatrixfile = fitopts["TMdata"]
    
    Tmatrix = np.loadtxt(Tmatrixfile)

    #Extract entries matching range specified by ran_size
    lower_bin = int(np.around(ran_size[0]/spacing, 0))
    upper_bin = int(np.around(ran_size[1]/spacing, 0))
    T_matrix_small = Tmatrix[lower_bin:upper_bin, lower_bin:upper_bin]

    # Flatten and save
    T_matrix_flat = np.ndarray.flatten(T_matrix_small)
    np.savetxt(TMfile, T_matrix_flat)
    
    print "Extracted transition matrix"
コード例 #17
0
    def print_errors(self):
        """
            Print all errors metrics.

            Note:
                For better printing format, install :mod:`prettytable`.

        """

        self.calc_metrics()

        try:
            from prettytable import PrettyTable

            table = PrettyTable(["Error", "Value"])
            table.align["Error"] = "l"
            table.align["Value"] = "l"

            for error in sorted(self.dict_errors.keys()):
                table.add_row([error, np.around(self.dict_errors[error], decimals=8)])

            print()
            print(table.get_string(sortby="Error"))
            print()

        except ImportError:
            print("For better table format install 'prettytable' module.")

            print()
            for error in sorted(self.dict_errors.keys()):
                print(error, np.around(self.dict_errors[error], decimals=8))
            print()
コード例 #18
0
def compute_3d_transforms(dataset, args):
    """Compute all 3d transforms (translation/rotation) and update"""

    if not is_3d_tranformation(args):
        return

    #Generate new UIDs automatically when any transform changes the geometry
    generate_new_uids(dataset, args.suid, args.foruid, \
        generate_soiud_from_seriesuid(defaulf_series_uid, dataset.InstanceNumber))

    pos = [dataset.ImagePositionPatient[0].real, \
           dataset.ImagePositionPatient[1].real, dataset.ImagePositionPatient[2].real, 1.]
    row = [dataset.ImageOrientationPatient[0].real, \
           dataset.ImageOrientationPatient[1].real, dataset.ImageOrientationPatient[2].real, 1.]
    col = [dataset.ImageOrientationPatient[3].real, \
           dataset.ImageOrientationPatient[4].real, dataset.ImageOrientationPatient[5].real, 1.]
    xform = np.identity(4)

    matrix_set_rotation(xform, args.ax, args.ay, args.az)

    precision = 5
    new_row = np.around(xform.dot(row), precision)
    new_col = np.around(xform.dot(col), precision)
    new_orient = np.concatenate([new_row[:3], new_col[:3]])

    matrix_set_translation(xform, args.x, args.y, args.z)
    new_pos = np.around([pos[0] + args.x, pos[1] + args.y, pos[2] + args.z], precision)

    #update dataset with new values
    set_str_vec(dataset.ImagePositionPatient, new_pos[:3], 3)
    set_str_vec(dataset.ImageOrientationPatient, new_orient[:6], 6)
コード例 #19
0
ファイル: vtktools.py プロジェクト: TerraFERMA/TerraFERMA
def VtuMatchLocationsArbitrary(vtu1, vtu2, tolerance = 1.0e-6):
  """
  Check that the locations in the supplied vtus match, returning True if they
  match and False otherwise.
  The locations may be in a different order.
  """
   
  locations1 = vtu1.GetLocations()
  locations2 = vtu2.GetLocations()
  if not locations1.shape == locations2.shape:
    return False   
    
  for j in range(locations1.shape[1]):
    # compute the smallest possible precision given the range of this coordinate
    epsilon = numpy.finfo(numpy.float).eps * numpy.abs(locations1[:,j]).max()
    if tolerance<epsilon:
      # the specified tolerance is smaller than possible machine precision
      # (or something else went wrong)
      raise Exception("ERROR: specified tolerance is smaller than machine precision of given locations")
    # ensure epsilon doesn't get too small (might be for zero for instance)
    epsilon=max(epsilon,tolerance/100.0)

    # round to that many decimal places (-2 to be sure) so that
    # we don't get rounding issues with lexsort
    locations1[:,j]=numpy.around(locations1[:,j], int(-numpy.log10(epsilon))-2)
    locations2[:,j]=numpy.around(locations2[:,j], int(-numpy.log10(epsilon))-2)

  # lexical sort on x,y and z coordinates resp. of locations1 and locations2
  sort_index1=numpy.lexsort(locations1.T)
  sort_index2=numpy.lexsort(locations2.T)
  
  # should now be in same order, so we can check for its biggest difference
  return numpy.allclose(locations1[sort_index1],locations2[sort_index2], atol=tolerance)
コード例 #20
0
ファイル: showit.py プロジェクト: freeman-lab/showit
def image(img, cmap='gray', bar=False, nans=True, clim=None, size=7, ax=None):
    """
    Streamlined display of images using matplotlib.

    Parameters
    ----------
    img : ndarray, 2D or 3D
        The image to display

    cmap : str or Colormap, optional, default = 'gray'
        A colormap to use, for non RGB images

    bar : boolean, optional, default = False
        Whether to append a colorbar

    nans : boolean, optional, deafult = True
        Whether to replace NaNs, if True, will replace with 0s

    clim : tuple, optional, default = None
        Limits for scaling image

    size : scalar, optional, deafult = 9
        Size of the figure

    ax : matplotlib axis, optional, default = None
        An existing axis to plot into
    """
    from matplotlib.pyplot import axis, colorbar, figure, gca

    img = asarray(img)

    if (nans is True) and (img.dtype != bool):
        img = nan_to_num(img)

    if ax is None:
        f = figure(figsize=(size, size))
        ax = gca()

    if img.ndim == 3:
        if bar:
            raise ValueError("Cannot show meaningful colorbar for RGB images")
        if img.shape[2] != 3:
            raise ValueError("Size of third dimension must be 3 for RGB images, got %g" % img.shape[2])
        mn = img.min()
        mx = img.max()
        if mn < 0.0 or mx > 1.0:
            raise ValueError("Values must be between 0.0 and 1.0 for RGB images, got range (%g, %g)" % (mn, mx))
        im = ax.imshow(img, interpolation='nearest', clim=clim)
    else:
        im = ax.imshow(img, cmap=cmap, interpolation='nearest', clim=clim)

    if bar is True:
        cb = colorbar(im, fraction=0.046, pad=0.04)
        rng = abs(cb.vmax - cb.vmin) * 0.05
        cb.set_ticks([around(cb.vmin + rng, 1), around(cb.vmax - rng, 1)])
        cb.outline.set_visible(False)

    axis('off')

    return im
コード例 #21
0
 def __init__(self,sortdat):
     self.counts = self.rate*self.itime
     self.sig_counts = np.sqrt(self.counts)  
     self.foursig = self.four_sigma[np.around(self.rate)]
     self.hexval = self.hex_adjust[np.around(self.hex_set)]
     self.base_alarm = self.rate + self.hexval + self.foursig
     self.source_time = sortdat.time
コード例 #22
0
ファイル: xor_kegg_pathways.py プロジェクト: codealphago/GSLR
def generate_dataset(first_pathway_id, first_pathway_genes, second_pathway_id, second_pathway_genes, proteomics, POSITIVE_SAMPLES=100, NEGATIVE_SAMPLES=100):

	means = proteomics.mean(axis=0)
	variances = proteomics.var(axis=0)

	negatives = sample_cov(50, proteomics)
	negatives = np.around(negatives + means.values, 6)
	negatives = pd.DataFrame(negatives, columns=proteomics.columns, index=['negative']*50)

	first_new_pathway_means = pd.Series(np.random.normal(0,variances), index=variances.index)[first_pathway_genes].fillna(0)
	second_new_pathway_means = pd.Series(np.random.normal(0,variances), index=variances.index)[second_pathway_genes].fillna(0)

	first_new_means = pd.concat([means, first_new_pathway_means], axis=1).fillna(0).sum(axis=1).reindex(means.index)
	second_new_means = pd.concat([means, second_new_pathway_means], axis=1).fillna(0).sum(axis=1).reindex(means.index)
	both_new_means = pd.concat([means, first_new_pathway_means, second_new_pathway_means], axis=1).fillna(0).sum(axis=1).reindex(means.index)

	first = sample_cov(50, proteomics)
	first = np.around(first + first_new_means.values, 6)
	first = pd.DataFrame(first, columns=proteomics.columns, index=[first_pathway_id]*50)

	second = sample_cov(50, proteomics)
	second = np.around(second + second_new_means.values, 6)
	second = pd.DataFrame(second, columns=proteomics.columns, index=[second_pathway_id]*50)

	both = sample_cov(50, proteomics)
	both = np.around(both + both_new_means.values, 6)
	both = pd.DataFrame(both, columns=proteomics.columns, index=['negative']*50)

	dataset = pd.concat([negatives,first,second,both]).sample(frac=1)  # shuffle

	filename = './xor_ludwig_svd_normals/'+first_pathway_id+'_'+second_pathway_id+'_inbiomap_exp.csv'
	return dataset.to_csv(filename, index=True, header=True)
コード例 #23
0
	def receivers_setup(self, pr, pz, Tiempo):

		self.receiver_r         = np.int32(np.around(np.array(pr)/self.dr))
		self.receiver_z         = np.int32(np.around(np.array(pz)/self.dz))

		self.N_z                = np.size(self.receiver_z,0)
		self.receivers_signals  = np.zeros((Tiempo,self.N_z), dtype=np.float32)
コード例 #24
0
ファイル: usoutline.py プロジェクト: chebee7i/twitter
def us_grid(resolution=.5, sparse=True):
    resolution = .5
    bounds = USA.bounds
    # Grid boundaries are determined by nearest degree.
    min_long = np.floor(bounds[0])
    min_lat  = np.floor(bounds[1])
    max_long = np.ceil(bounds[2])
    max_lat  = np.ceil(bounds[3])
    # Division should be close to an integer.
    # Add one to number of points to include the end
    # This is robust only to resolutions that "evenly" divide the range.
    nPointsLong = np.around((max_long - min_long) / resolution) + 1
    nPointsLat  = np.around((max_lat  - min_lat ) / resolution) + 1
    long_points = np.linspace(min_long, max_long, nPointsLong)
    lat_points  = np.linspace(min_lat,  max_lat,  nPointsLat )

    outline = contiguous_outline2('../tiger/cb_2013_us_nation_20m.shp')

    for i, (xi, yi) in enumerate(product(range(len(long_points)-1),
                                         range(len(lat_points)-1))):
        cell = box(long_points[xi], lat_points[yi],
                   long_points[xi+1], lat_points[yi+1])
        if sparse:
            # Add cell only if it intersects contiguous USA
            if cell.intersects(outline):
                yield cell
        else:
            yield cell
コード例 #25
0
ファイル: test_poibin.py プロジェクト: tsakim/poibin
def test_pmf_accuracy():
    """Compare accuracy of the probability mass function.

    Compare the results with the accuracy check proposed in [Hong2013]_,
    equation (15).
    """
    [p1, p2, p3] = np.around(np.random.random_sample(size=3), decimals=2)
    [n1, n2, n3] = np.random.random_integers(1, 10, size=3)
    nn = n1 + n2 + n3
    l1 = [p1 for i in range(n1)]
    l2 = [p2 for i in range(n2)]
    l3 = [p3 for i in range(n3)]
    p = l1 + l2 + l3
    b1 = binom(n=n1, p=p1)
    b2 = binom(n=n2, p=p2)
    b3 = binom(n=n3, p=p3)
    k = np.random.randint(0, nn + 1)
    chi_bn = 0
    for j in range(0, k+1):
        for i in range(0, j+1):
            chi_bn += b1.pmf(i) * b2.pmf(j - i) * b3.pmf(k - j)
    pb = PoiBin(p)
    chi_pb = pb.pmf(k)
    assert np.all(np.around(chi_bn, decimals=10) == np.around(chi_pb,
                                                              decimals=10))
コード例 #26
0
ファイル: Plots.py プロジェクト: TimHarvey2/CoolProp
 def _plotRound(self, values):
     """
     A function round an array-like object while maintaining the
     amount of entries. This is needed for the isolines since we
     want the labels to look pretty (=rounding), but we do not
     know the spacing of the lines. A fixed number of digits after
     rounding might lead to reduced array size.
     """
     inVal   = numpy.unique(numpy.sort(numpy.array(values)))
     output  = inVal[1:] * 0.0
     digits  = -1
     limit   = 10
     lim     = inVal * 0.0 + 10
     # remove less from the numbers until same length,
     # more than 10 significant digits does not really
     # make sense, does it?
     while len(inVal) > len(output) and digits < limit:
         digits += 1
         val     = ( numpy.around(numpy.log10(numpy.abs(inVal))) * -1) + digits + 1
         val     = numpy.where(val < lim, val,  lim)
         val     = numpy.where(val >-lim, val, -lim)
         output  = numpy.zeros(inVal.shape)
         for i in range(len(inVal)):
             output[i] = numpy.around(inVal[i],decimals=int(val[i]))
         output = numpy.unique(output)
     return output
コード例 #27
0
ファイル: score.py プロジェクト: 675801717/orange3
def _symmetrical_uncertainty(X, Y):
    """Symmetrical uncertainty, Press et al., 1988."""
    from Orange.preprocess._relieff import contingency_table
    X, Y = np.around(X), np.around(Y)
    cont = contingency_table(X, Y)
    ig = InfoGain().from_contingency(cont, 1)
    return 2 * ig / (_entropy(cont.sum(0)) + _entropy(cont.sum(1)))
コード例 #28
0
ファイル: sdm_test.py プロジェクト: yymath/menpo
def test_obtain_shape_from_bb():
    s = sdm2.obtain_shape_from_bb(np.array([[26, 49], [350, 400]]))
    assert ((np.around(s.points) == np.around(initial_shape[3].points)).
            all())
    assert (s.n_dims == 2)
    assert (s.n_landmark_groups == 0)
    assert (s.n_points == 68)
コード例 #29
0
ファイル: Novelty.py プロジェクト: Kazjon/SurpriseEval
	def __init__(self, parser, k, startIndex=-1, parallel = True, batch=True):
		if startIndex==-1:
			startIndex = k
		self.Data = parser
		self.names = self.Data.getNames()
		self.k = k
		self.clusters = KMeans(k, n_jobs=1 - 2*(not parallel),n_init=10)
		self.props = self.Data.getProperties()
		self.artefacts = np.atleast_2d(self.Data.getList(self.props[0]))
		for attr in self.Data.getProperties()[1:]:
			self.artefacts = np.append(self.artefacts,np.atleast_2d(self.Data.getList(attr)),axis=0)
		self.artefacts = self.artefacts.T
		self.times = self.Data.getList()
		zipped = zip(self.times,self.artefacts,self.names)
		zipped = sorted(zipped,key=lambda x: x[0])
		unzipped = zip(*zipped)
		self.times = list(unzipped[0])
		self.artefacts = np.array(unzipped[1])
		self.names = list(unzipped[2])
		if batch:
			self.trainAll()
			self.currentIndex = len(self.names)-1
		else:
			self.currentIndex = startIndex
			self.noveltyList = np.zeros(len(self.artefacts))
			while self.currentIndex+1 < len(self.names) and self.times[self.currentIndex+1]==self.times[self.currentIndex]:
				self.currentIndex +=1
			
			while self.currentIndex < len(self.names):
				self.train()
				newArtefacts = [self.currentIndex+1]
				while newArtefacts[-1]+1 < len(self.names) and self.times[newArtefacts[-1]+1]==self.times[newArtefacts[0]]:
					newArtefacts.append(newArtefacts[-1]+1)
				novelties = []
				for i,a in enumerate(self.names[newArtefacts[0]:newArtefacts[-1]+1]):
					dist,cluster = self.novelty(a,normedDistance=False)
					time=self.times[self.names.index(a)]
					novelties.append((dist/self.sizes[cluster],cluster,time,a))
					self.noveltyList[self.currentIndex+i] = novelties[-1][0]
				novelties = sorted(novelties,key=lambda x: x[0])
				scales = {}
				translates = {}
				for k in self.Data.pastCalc.keys():
					if k in self.props:
						scales[k] = self.Data.pastCalc[k]['std']
						translates[k] = self.Data.pastCalc[k]['avg']
				for n in novelties[::-1]:
					cent = np.copy(self.centroids[n[1]])
					art = np.copy(self.artefacts[self.names.index(n[3])])
					c = self.clusters.predict(art)[0]
					for i,v in enumerate(self.props):
						cent[i] = np.around(cent[i] * scales[v] + translates[v],decimals=1)
						art[i] = np.around(art[i] * scales[v] + translates[v],decimals=1)
					print 'Closest cluster to',n[3],'(released',str(n[2])+') was #'+str(n[1]),'with distance',str(n[0])+'. Actual cluster was',str(c)+'.'
					if n[0] > 1:
						print 'Attrs:	  RAM	 ROM   CPU	 DDia  DWid  DLen	Wid   Len	 Dep	Vol	Mass   DPI'
						print 'Cluster:',cent
						print 'Design: ',art
						print 'Diff:   ',art-cent
				self.increment(len(newArtefacts))
コード例 #30
0
ファイル: grid.py プロジェクト: ivn888/karta
    def resample(self, dx, dy, method='nearest'):
        """ Resample array to have spacing `dx`, `dy'. The grid origin remains
        in the same position.

        Parameters
        ----------
        dx : float
            cell dimension 1
        dy : float
            cell dimension 2
        method : str, optional
            interpolation method, currently only 'nearest' supported
        """
        ny, nx = self.bands[0].size
        dx0, dy0 = self._transform[2:4]
        xllcenter, yllcenter = self.center_llref()

        if method == 'nearest':
            rx, ry = dx / dx0, dy / dy0
            I = np.around(np.arange(ry/2, ny, ry)-0.5).astype(int)
            J = np.around(np.arange(rx/2, nx, rx)-0.5).astype(int)
            if I[-1] == ny:
                I = I[:-1]
            if J[-1] == nx:
                J = J[:-1]
            JJ, II = np.meshgrid(J, I)
            values = self[:,:][II, JJ]
        else:
            raise NotImplementedError('method "{0}" not '
                                      'implemented'.format(method))

        t = self._transform
        tnew = (t[0], t[1], dx, dy, t[4], t[5])
        return RegularGrid(tnew, values=values, crs=self.crs,
                           nodata_value=self.nodata)
コード例 #31
0
def get_blue_edge_vert(edge,A,B,topo_triangles):
    points = [A,B] 
    normalized_weighted_sums = []
    for point in points:
        #print point.triangles
        triangle_normals = []
        triangle_angles = []
        for key in point.triangles:
            triangle = topo_triangles[key]
            if triangle.color == 'red':
                edge_start = point.coords
                # get normals
                triangle_normals.append(triangle.normal_vec)
                # get weights
                edge_ends = [vert for vert in triangle.vert_positions if vert != edge_start]
                v1 = np.subtract(edge_ends[0],edge_start)
                v2 = np.subtract(edge_ends[1],edge_start)
                angle = angle_between(v1,v2)
                triangle_angles.append(angle)
        # calculate weighted sum
        total = sum(triangle_angles)
        triangle_weights = [angle/total for angle in triangle_angles]
        zipped = zip(triangle_normals, triangle_weights)
        weighted_angles = [ normal/weight for normal,weight in zipped]
        weighted_sum = np.sum(weighted_angles, axis=0)
        # normalize weighted sum
        length = np.linalg.norm(weighted_sum)       
        normalized_weighted_sum = np.around(weighted_sum / length, decimals=5)
        normalized_weighted_sums.append(normalized_weighted_sum)
    N,M = normalized_weighted_sums 
    AB = np.subtract(A.coords,B.coords) # A dan B? of ergens anders omgedraaid? 
    # H=ABx(MxN)
    H = np.cross (AB , np.cross(M,N) )
    # h=AB•N
    h = np.dot(  AB , N)
    # k = 2(M•N)(AB•N)–2(AB•M)
    k = (2*np.dot(M,N) * np.dot(AB,N)) - (2* np.dot(AB,M))
    #V = (A+B)/2+(h/k)H
    V = np.add(B.coords,A.coords)/2 + (h/k)*H
    #print "*********************************************"
    #print list(A.coords)
    #print list(B.coords)
    dist1 = np.linalg.norm(np.array(A.coords)-list(B.coords))
    dist2 = np.linalg.norm(V-list(A.coords))
    dist3 = np.linalg.norm(V-list(B.coords))
    
    print dist2
    if dist2 > 0.39:  # CUSTOMIZE THIS VALUE! EEK  
        print "LONG EDGE IN CHAMFER EDGE" 
        print dist2
        print dist3
        print A.coords
        print B.coords
        print V
        V = (np.array(A.coords)+ np.array(B.coords)) / 2
        #print new
    #dist = np.linalg.norm(w-list(planes[0][0]))
    #dist = np.linalg.norm(w-list(planes[0][0]))
    #print dist
    #print V
    #print dist1
    #print dist2
    #print dist3
    return V
コード例 #32
0
print('容器资源需求:\n', resource_Container)
print('容器数据传输总量:\n', data_trans_Container)
print('边缘节点间带宽:\n', Band)
print('部署决策矩阵:\n', X)
Y = np.copy(X)
for i in range(Container_Num):
    for j in range(EN_Num):
        if X[i][j] == 1:
            resource_used_EN[j][0] += resource_Container[i][0]
            resource_used_EN[j][1] += resource_Container[i][1]
            resource_used_EN[j][2] += resource_Container[i][2]
            break
        else:
            continue
resource_remaining_EN = resource_total_EN - resource_used_EN
resource_utilization_EN = np.around(resource_used_EN / resource_total_EN,
                                    decimals=2)
for i in range(EN_Num):
    load_EN[i] = (resource_utilization_EN[i][0] + resource_utilization_EN[i][1]
                  + resource_utilization_EN[i][2]) / 3
for i in range(EN_Num):
    for j in range(EN_Num):
        load_differentiation_EN[i][j] = load_EN[i] / load_EN[j]
load_differentiation_EN = np.around(load_differentiation_EN, decimals=2)
print('边缘节点已用资源:\n', resource_used_EN)
print('边缘节点剩余资源:\n', resource_remaining_EN)
print('边缘节点资源利用率:\n', resource_utilization_EN)
print('边缘节点负载:\n', load_EN)
print('边缘节点间负载差异化:\n', load_differentiation_EN)


# 负载均衡检测
コード例 #33
0
ファイル: linkprediction_countries3.py プロジェクト: IBM/LOA
    #params = body2.alpha['default'].get_params().detach().numpy()
    #print("2nd predicate: " + str(params))
    beta, weights = body2.alpha['default'].get_params()
    print("2nd predicate: " + str(beta.item()) + " " +
          str(weights.detach().numpy()))
    np.set_printoptions(precision=3, suppress=True)
    #params = body1.alpha['default'].get_params().detach().numpy()
    #print("3nd predicate: " + str(params))
    beta, weights = body1.alpha['default'].get_params()
    print("3nd predicate: " + str(beta.item()) + " " +
          str(weights.detach().numpy()))

    lnn_beta, lnn_wts, slacks = join.AND['default'].cdd()
    np.set_printoptions(precision=3, suppress=True)
    print("LNN beta, weights: " + \
          str(np.around(lnn_beta.item(), decimals=3)) + " " + str(lnn_wts.detach().numpy()))

    dfs_test = load_metadata(background_fname)
    load_data(facts_fname_test, dfs_test)

    labels_df_test = dfs_test[target]
    labels_df_test.columns = [attr_name + "0", attr_name + "3"]
    print("read test data")

    yhat = score(proj, batch_size)
    print("done evaluation (" + str(time.time() - begtime) + "s)")

    test_countries = list(
        set(labels_df_test[[attr_name + "0"
                            ]].to_numpy().transpose()[0].tolist()))
    test_regions = list(
コード例 #34
0
        4: None,
        5: None,
        6: None,
        7: None,
        8: None,
        9: None,
        10: None
    }

    end_index = 0
    start_index = 0
    count = 1
    # starting point
    start_point = None
    for val in cal_pt_cloud:
        val = np.around(val, decimals=5)
        if start_point == None:
            # case 1: start_point is none
            start_point = val[2:4].tolist()
        else:
            # case 2: start_point isn't none
            # calculate the difference between x and y
            x_diff = abs(val[2] - start_point[0])
            y_diff = abs(val[3] - start_point[1])
            if x_diff > 0.06 or y_diff > 0.06:
                # by observation, the minimum between each marker point
                # is 0.6.
                # case: reach a new cluster for marker
                # cluster = cal_pt_cloud[start_index:end_index, 0:2]
                storage[count] = start_index, end_index
                count += 1
コード例 #35
0
K = np.array([2, 3, 4, 5])
K_e = 3  # np.random.choice(K)

s = np.cos(2 * math.pi * f0 * (np.arange(fd)) / fd)

# print(s)

s1 = s[0::K_e]

# print(s1)

# print(s)
# print(abs(np.fft.fft(s1)))

m1, mi = abs(np.fft.fft(s1)).max(0), np.argmax(
    np.around(abs(np.fft.fft(s1)), decimals=5)) + 1
# print(np.argmax(abs(np.fft.fft(s1)))+1)
# print(mi)

fn_et = fd * mi / len(s1)
# print(fn_et)
#
# plt.stem(abs(np.fft.fft(s)))
# plt.show()
# plt.close()
# plt.stem(abs(np.fft.fft(s1)))
# plt.show()
# plt.close()

T = 10
fe = 0
コード例 #36
0
def ecc_plot(aryMean, vecEccBin, strPathOut):
    """
    Plot results for eccentricity & cortical depth analysis.

    This version plots the values using two separate colourmaps for negative
    and positive values.

    Plots statistical parameters (e.g. parameter estimates) by cortical depth
    (x-axis) and pRF eccentricity (y-axis). This function is part of a tool for
    analysis of cortical-depth-dependent fMRI responses at different
    retinotopic eccentricities.
    """
    # Number of eccentricity bins:
    varEccNum = vecEccBin.shape[0]

    # Font type:
    strFont = 'Liberation Sans'

    # Font colour:
    vecFontClr = np.array([17.0/255.0, 85.0/255.0, 124.0/255.0])

    # Find minimum and maximum correlation values:
    varMin = np.percentile(aryMean, 2.5)
    varMax = np.percentile(aryMean, 97.5)

    # Round:
    varMin = (np.floor(varMin * 0.1) / 0.1)
    varMax = (np.ceil(varMax * 0.1) / 0.1)

    # Same scale for negative and positive colour bar:
    if np.greater(np.absolute(varMin), varMax):
        varMax = np.absolute(varMin)
    else:
        varMin = np.multiply(-1.0, np.absolute(varMax))

    # Fixed axis limites for comparing plots across conditions/ROIs:
    # varMin = -400.0
    # varMax = 400.0

    # Create main figure:
    fig01 = plt.figure(figsize=(4.0, 3.0),
                       dpi=200.0,
                       facecolor=([1.0, 1.0, 1.0]),
                       edgecolor=([1.0, 1.0, 1.0]))

    # Big subplot in the background for common axes labels:
    axsCmn = fig01.add_subplot(111)

    # Turn off axis lines and ticks of the big subplot:
    axsCmn.spines['top'].set_color('none')
    axsCmn.spines['bottom'].set_color('none')
    axsCmn.spines['left'].set_color('none')
    axsCmn.spines['right'].set_color('none')
    axsCmn.tick_params(labelcolor='w',
                       top=False,
                       bottom=False,
                       left=False,
                       right=False)

    # Set and adjust common axes labels:
    axsCmn.set_xlabel('Cortical depth',
                      alpha=1.0,
                      fontname=strFont,
                      fontweight='normal',
                      fontsize=7.0,
                      color=vecFontClr,
                      position=(0.5, 0.0))
    axsCmn.set_ylabel('pRF eccentricity',
                      alpha=1.0,
                      fontname=strFont,
                      fontweight='normal',
                      fontsize=7.0,
                      color=vecFontClr,
                      position=(0.0, 0.5))
    axsCmn.set_title('fMRI signal change',
                     alpha=1.0,
                     fontname=strFont,
                     fontweight='bold',
                     fontsize=10.0,
                     color=vecFontClr,
                     position=(0.5, 1.1))

    # Create colour-bar axis:
    axsTmp = fig01.add_subplot(111)

    # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

    # Number of colour increments:
    varNumClr = 20

    # Colour values for the first colormap (used for negative values):
    aryClr01 = plt.cm.PuBu(np.linspace(0.1, 1.0, varNumClr))

    # Invert the first colour map:
    aryClr01 = np.flipud(np.array(aryClr01, ndmin=2))

    # Colour values for the second colormap (used for positive values):
    aryClr02 = plt.cm.OrRd(np.linspace(0.1, 1.0, varNumClr))

    # Combine negative and positive colour arrays:
    aryClr03 = np.vstack((aryClr01, aryClr02))

    # Create new custom colormap, combining two default colormaps:
    objCustClrMp = colors.LinearSegmentedColormap.from_list('custClrMp',
                                                            aryClr03)

    # Lookup vector for negative colour range:
    vecClrRngNeg = np.linspace(varMin, 0.0, num=varNumClr)

    # Lookup vector for positive colour range:
    vecClrRngPos = np.linspace(0.0, varMax, num=varNumClr)

    # Stack lookup vectors:
    vecClrRng = np.hstack((vecClrRngNeg, vecClrRngPos))

    # 'Normalize' object, needed to use custom colour maps and lookup table
    # with matplotlib:
    objClrNorm = colors.BoundaryNorm(vecClrRng, objCustClrMp.N)

    # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

    # Plot correlation coefficients of current depth level:
    pltTmpCorr = plt.imshow(aryMean,
                            interpolation='nearest',  # 'none',  # 'bicubic',
                            origin='lower',
                            norm=objClrNorm,
                            cmap=objCustClrMp)

    # Position of labels for the x-axis:
    vecXlblsPos = np.array([0, (aryMean.shape[1] - 1)])
    # Set position of labels for the x-axis:
    axsTmp.set_xticks(vecXlblsPos)
    # Create list of strings for labels:
    lstXlblsStr = ['WM', 'CSF']
    # Set the content of the labels (i.e. strings):
    axsTmp.set_xticklabels(lstXlblsStr,
                           alpha=0.9,
                           fontname=strFont,
                           fontweight='bold',
                           fontsize=8.0,
                           color=vecFontClr)

    # Position of labels for the y-axis:
    vecYlblsPos = np.arange(-0.5, (varEccNum - 0.5), 1.0)
    # Set position of labels for the y-axis:
    axsTmp.set_yticks(vecYlblsPos)
    # Create list of strings for labels:
    # lstYlblsStr = map(str,
    #                   np.around(vecEccBin, decimals=1)
    #                   )
    lstYlblsStr = [str(x) for x in np.around(vecEccBin, decimals=1)]
    # Set the content of the labels (i.e. strings):
    axsTmp.set_yticklabels(lstYlblsStr,
                           alpha=0.9,
                           fontname=strFont,
                           fontweight='bold',
                           fontsize=8.0,
                           color=vecFontClr)

    # Turn of ticks:
    axsTmp.tick_params(labelcolor=([0.0, 0.0, 0.0]),
                       top=False,
                       bottom=False,
                       left=False,
                       right=False)

    # We create invisible axes for the colour bar slightly to the right of the
    # position of the last data-axes. First, retrieve position of last
    # data-axes:
    objBbox = axsTmp.get_position()
    # We slightly adjust the x-position of the colour-bar axis, by shifting
    # them to the right:
    vecClrAxsPos = np.array([(objBbox.x0 * 7.5),
                             objBbox.y0,
                             objBbox.width,
                             objBbox.height])
    # Create colour-bar axis:
    axsClr = fig01.add_axes(vecClrAxsPos,
                            frameon=False)

    # Add colour bar:
    pltClrbr = fig01.colorbar(pltTmpCorr,
                              ax=axsClr,
                              fraction=1.0,
                              shrink=1.0)

    # The values to be labeled on the colour bar:
    # vecClrLblsPos01 = np.arange(varMin, 0.0, 10)
    # vecClrLblsPos02 = np.arange(0.0, varMax, 100)
    vecClrLblsPos01 = np.linspace(varMin, 0.0, num=3)
    vecClrLblsPos02 = np.linspace(0.0, varMax, num=3)
    vecClrLblsPos = np.hstack((vecClrLblsPos01, vecClrLblsPos02))

    # The labels (strings):
    # vecClrLblsStr = map(str, vecClrLblsPos)
    vecClrLblsStr = [str(x) for x in vecClrLblsPos]

    # Set labels on coloubar:
    pltClrbr.set_ticks(vecClrLblsPos)
    pltClrbr.set_ticklabels(vecClrLblsStr)
    # Set font size of colour bar ticks, and remove the 'spines' on the right
    # side:
    pltClrbr.ax.tick_params(labelsize=8.0,
                            tick2On=False)

    # Make colour-bar axis invisible:
    axsClr.axis('off')

    # Save figure:
    fig01.savefig(strPathOut,
                  dpi=160.0,
                  facecolor='w',
                  edgecolor='w',
                  orientation='landscape',
                  bbox_inches='tight',
                  pad_inches=0.2,
                  transparent=False,
                  frameon=None)
コード例 #37
0
def video_frame(frame):
    global cnt
    # Initialize the frame size for drone adjustment
    if drone.frameWidth == 0:
        drone.frameWidth = numpy.size(frame, 1)
    if drone.frameHeight == 0:
        drone.frameHeight = numpy.size(frame, 0)

    # Initialize variables to compare the current frame to
    if drone.thisFrame is None:
        drone.lastFrame = frame
    else:
        drone.lastFrame = drone.thisFrame
    drone.thisFrame = frame

    # # Convert frames to grayscale and blur them
    # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    # gray = cv2.GaussianBlur(gray, (21, 21), 0)
    #
    # grayLastFrame = cv2.cvtColor(drone.lastFrame, cv2.COLOR_BGR2GRAY)
    # grayLastFrame = cv2.GaussianBlur(grayLastFrame, (21, 21), 0)
    #
    # # compute the absolute difference between the current frame and the last frame
    # frameDelta = cv2.absdiff(grayLastFrame, gray)

    ret, thresh = cv2.threshold(frame, 127, 255, cv2.THRESH_BINARY)
    # edges = cv2.cvtColor(edges, cv2.COLOR_BGR2GRAY)

    # Find edges after motion detection
    edges = cv2.Canny(thresh, drone.minEdgeVal, drone.maxEdgeVal)

    if drone.pictureBoolean:
        drone.pictureBoolean = False
        cv2.imwrite("saved_image.jpg", edges)

    # Find sphero using circles
    if drone.findSphero:
        # Find circles after detecting edges
        circles = cv2.HoughCircles(edges,
                                   cv2.HOUGH_GRADIENT,
                                   1.2,
                                   5,
                                   param1=50,
                                   param2=30,
                                   minRadius=drone.minCircleRadius,
                                   maxRadius=drone.maxCircleRadius)
        # circles = cv2.HoughCircles(edges, cv2.HOUGH_GRADIENT, 1.2, 10, minRadius=drone.minCircleRadius, maxRadius=drone.maxCircleRadius)

        if circles is not None:
            circles = numpy.uint16(numpy.around(circles))
            listX = []
            listY = []
            listR = []

            for i in circles[0, :]:
                # # draw the outer circle
                # cv2.circle(edges, (i[0], i[1]), i[2], (255, 255, 255), 2)
                # # draw the center of the circle
                # cv2.circle(edges, (i[0], i[1]), 2, (255, 255, 255), 3)

                # Save the centers and radii
                listX.append(i[0])
                listY.append(i[1])
                listR.append(i[2])
                # print("Edges circle center at: " + str(i[0]) + ", " + str(i[1]))

            # Sort the centers and radii and print/draw the median
            sortedX = mergeSort(listX)
            sortedY = mergeSort(listY)
            sortedR = mergeSort(listR)

            medianX = sortedX[len(sortedX) // 2]
            medianY = sortedY[len(sortedY) // 2]
            medianR = sortedR[len(sortedR) // 2]

            drone.objectCenterX = medianX
            drone.objectCenterY = medianY

            cv2.circle(edges, (medianX, medianY), medianR, (255, 255, 255), 2)
            cv2.circle(edges, (medianX, medianY), 2, (255, 255, 255), 2)
            # print("Median edges circle center: " + str(medianX) + ", " + str(medianY) + " with radius " + str(medianR))

            drone.sinceLastSphero = 0
            drone.foundCircle = True
        else:
            # Fake a circle in the center if none found
            drone.objectCenterX = drone.frameWidth >> 1
            drone.objectCenterY = drone.frameHeight >> 1

            drone.sinceLastSphero += 1
            drone.foundCircle = False
    else:
        # Fake a circle in the center if none found
        drone.objectCenterX = drone.frameWidth >> 1
        drone.objectCenterY = drone.frameHeight >> 1
        drone.foundCircle = False

    # Find sphero using blobs if no circles found
    if drone.findSphero and not drone.foundCircle:
        kernel = numpy.ones((5, 5), numpy.uint8)
        edges = cv2.dilate(edges, kernel, iterations=1)
        edges = cv2.erode(edges, kernel, iterations=1)

        params = cv2.SimpleBlobDetector_Params()

        # Filter by Circularity
        # params.filterByCircularity = True
        # params.minCircularity = 0.6

        # Filter by Area.
        # params.filterByArea = True
        # params.minArea = 16

        detector = cv2.SimpleBlobDetector_create(params)
        keypoints = detector.detect(edges)

        if keypoints is not None:
            listX = []
            listY = []
            listR = []

            for keypoint in keypoints:
                # # draw the outer circle
                # cv2.circle(edges, (i[0], i[1]), i[2], (255, 255, 255), 2)
                # # draw the center of the circle
                # cv2.circle(edges, (i[0], i[1]), 2, (255, 255, 255), 3)

                # Save the centers and radii
                # print point.pt[0]
                listX.append(int(keypoint.pt[0]))
                listY.append(int(keypoint.pt[1]))
                listR.append(int(keypoint.size / 2))
                # print("Edges circle center at: " + str(i[0]) + ", " + str(i[1]))
                # print keypoint.pt

            if len(listX) > 0 and len(listY) > 0 and len(listR) > 0:
                # Sort the centers and radii and print/draw the median
                sortedX = mergeSort(listX)
                sortedY = mergeSort(listY)
                sortedR = mergeSort(listR)

                # print sortedX
                medianX = sortedX[len(sortedX) // 2]
                medianY = sortedY[len(sortedY) // 2]
                medianR = sortedR[len(sortedR) // 2]

                drone.objectCenterX = medianX
                drone.objectCenterY = medianY

                cv2.circle(edges, (medianX, medianY), medianR, (255, 255, 255),
                           2)
                cv2.circle(edges, (medianX, medianY), 2, (255, 255, 255), 2)
                # print("Median edges circle center: " + str(medianX) + ", " + str(medianY) + " with radius " + str(medianR))

                drone.sinceLastSphero = 0
            else:
                # Fake a circle in the center if none found
                drone.objectCenterX = drone.frameWidth >> 1
                drone.objectCenterY = drone.frameHeight >> 1

                drone.sinceLastSphero += 1
        else:
            # Fake a circle in the center if none found
            drone.objectCenterX = drone.frameWidth >> 1
            drone.objectCenterY = drone.frameHeight >> 1

            drone.sinceLastSphero += 1
    elif not drone.foundCircle:
        # Fake a circle in the center if none found
        drone.objectCenterX = drone.frameWidth >> 1
        drone.objectCenterY = drone.frameHeight >> 1
        drone.sinceLastSphero = 0

    cnt += 1
    cv2.imshow("Drone", frame)
    # cv2.imshow("Motion Detection", frameDelta)
    cv2.imshow("Threshold Edges", edges)
    cv2.imshow("Threshold", thresh)
    cv2.waitKey(10)
w_list = []
for T in T_list:
    w_t = w_0
    for t in range(T):
        w_t = w_t.dot(M)
    w_list.append(w_t) 


# In[7]:


dt = pd.DataFrame(np.arange(1, 26))
for i in range(len(w_list)):
    index = np.argsort(w_list[i])[::-1][:25]
    name = team_name.iloc[index].reset_index(drop=True)
    score = np.around(w_list[i][index], 5)
    dt['Team_name'+str(T_list[i])] = name
    dt['Team_score'+str(T_list[i])] = score
dt.to_excel('ranking.xlsx')


# In[13]:


e_value, e_vector = eigs(M.T, 1)
u = e_vector.T[0]


# In[14]:

コード例 #39
0
def store_hpm_pc(folder, out, binsize=5, NAN=True, to_csv=False):
    # TODO: CAREFUL FOR SESSIONS THAT ARE TOO SHORT
    """ Saves pd.DataFrame as hdf5 (DEFAULT), which contains 3 matrices (NAN default):
        hpm: animal day, session, window 0-14, detailed HPM matrix
        pc: animal day, session, window 0-14, detailed PC matrix
        summary: animal, day, session, summary metrics (1 value each session)
        NOTE: data keeps four decimals for PC, 2 decimals for hpm
    """
    # GAIN as percentage increase
    processed = os.path.join(folder, 'processed')
    animals, days, sessions, windowshpm, windowsPC = [], [], [], [], []
    sum_animals, sum_days, sum_sessions, maxPCs, maxHPMs, tPCs, tHPMs, \
    PCgains, HPMgains = [], [], [], [], [], [], [], [], []
    exceptions = []
    misaligned = []
    nofile = []
    if NAN:
        results = {}
        maxWindow = 0
        nsession = 0
        for animal in get_all_animals(processed):
            results[animal] = {}
            animal_path = os.path.join(processed, animal)
            ds = get_animal_days(animal_path)
            for d in ds:
                try:
                    _, (hpm, totalHPM, HPMgain), (pc, cumuPC, totalPC,
                                                  PCgain), _ = learning_params(
                                                      folder,
                                                      animal,
                                                      d,
                                                      bin_size=binsize,
                                                      total=2)
                except KeyError:
                    exceptions.append((animal, d))
                    tPCs.append(np.nan)
                    tHPMs.append(np.nan)
                    maxHPMs.append(np.nan)
                    maxPCs.append(np.nan)
                    PCgains.append(np.nan)
                    HPMgains.append(np.nan)
                    hpm, pc, cumuPC = [np.nan], [np.nan], [np.nan]
                    continue
                except IndexError:
                    misaligned.append((animal, d))
                    tPCs.append(np.nan)
                    tHPMs.append(np.nan)
                    maxHPMs.append(np.nan)
                    maxPCs.append(np.nan)
                    PCgains.append(np.nan)
                    HPMgains.append(np.nan)
                    hpm, pc, cumuPC = [np.nan], [np.nan], [np.nan]
                    continue
                except OSError:
                    print(f'cannot open {animal} {d}')
                    nofile.append((animal, d))
                    tPCs.append(np.nan)
                    tHPMs.append(np.nan)
                    maxHPMs.append(np.nan)
                    maxPCs.append(np.nan)
                    PCgains.append(np.nan)
                    HPMgains.append(np.nan)
                    hpm, pc, cumuPC = [np.nan], [np.nan], [np.nan]
                    continue
                hpm = np.around(hpm, 2)
                pc = np.around(pc, 4)
                cumuPC = np.around(cumuPC, 4)
                totalHPM, HPMgain = np.around((totalHPM, HPMgain), 2)
                totalPC, PCgain = np.around((totalPC, PCgain), 4)
                maxHPM = np.nanmax(hpm) if len(hpm) else np.nan
                maxPC = np.nanmax(pc) if len(pc) else np.nan
                tPCs.append(totalPC)
                tHPMs.append(totalHPM)
                maxHPMs.append(maxHPM)
                maxPCs.append(maxPC)
                PCgains.append(PCgain)
                HPMgains.append(HPMgain)

                results[animal][d] = {'hpm': hpm, 'pc': pc, 'cumuPC': cumuPC}
                maxWindow = max(maxWindow, len(hpm))
                nsession += 1
            sum_animals.append([animal] * len(ds))
            sum_sessions.append(np.arange(len(ds)))
            sum_days.append(ds)
        animals, days, sessions, windowshpm, windowsPC, windows_cumuPC = [], [], [], [], [], []
        for animal in results:
            animals.append([animal] * len(results[animal]))
            days.append(sorted(results[animal].keys()))
            sessions.append(np.arange(len(results[animal])))
            windowshpm.append(
                np.vstack([
                    np.concatenate(
                        (results[animal][d]['hpm'], [np.nan] *
                         (maxWindow - len(results[animal][d]['hpm']))))
                    for d in sorted(results[animal])
                ]))
            windowsPC.append(
                np.vstack([
                    np.concatenate(
                        (results[animal][d]['pc'], [np.nan] *
                         (maxWindow - len(results[animal][d]['pc']))))
                    for d in sorted(results[animal])
                ]))
            windows_cumuPC.append(
                np.vstack([
                    np.concatenate(
                        (results[animal][d]['cumuPC'], [np.nan] *
                         (maxWindow - len(results[animal][d]['cumuPC']))))
                    for d in sorted(results[animal])
                ]))
        windowshpm = np.vstack(windowshpm)
        windowsPC = np.vstack(windowsPC)
        windows_cumuPC = np.vstack(windows_cumuPC)

    else:
        # TODO: NOT VALIDATED YET
        windows = []
        for animal in get_all_animals(processed):
            animal_path = os.path.join(processed, animal)
            winTot = 0
            ds = get_animal_days(animal_path)
            for i, d in enumerate(ds):
                _, (hpm, totalHPM,
                    HPMgain), (pc, totalPC,
                               PCgain), _ = learning_params(folder,
                                                            animal,
                                                            d,
                                                            bin_size=binsize,
                                                            total=True)
                hpm = np.around(hpm, 2)
                pc = np.around(pc, 4)
                totalHPM, HPMgain = np.around((totalHPM, HPMgain), 2)
                totalPC, PCgain = np.around((totalPC, PCgain), 4)
                maxHPM = np.nanmax(hpm)
                maxPC = np.nanmax(pc)
                tPCs.append(totalPC)
                tHPMs.append(totalHPM)
                maxHPMs.append(maxHPM)
                maxPCs.append(maxPC)
                PCgains.append(PCgain)
                HPMgains.append(HPMgain)

                windowshpm.append(hpm)
                windowsPC.append(pc)
                winN = len(hpm)
                windows.append(np.arange(winN))
                days.append(winN * [d])
                sessions.append(winN * [i])
                winTot += winN
            animals.append([animal] * winTot)

            sum_animals.append([animal] * len(ds))
            sum_sessions.append(np.arange(len(ds)))
            sum_days.append(ds)
        windows = np.concatenate(windows)
        windowshpm = np.concatenate(windowshpm)
        windowsPC = np.concatenate(windowsPC)

    animals = np.concatenate(animals)
    days = np.concatenate(days)
    sessions = np.concatenate(sessions)

    sum_animals = np.concatenate(sum_animals)
    sum_sessions = np.concatenate(sum_sessions)
    sum_days = np.concatenate(sum_days)

    if NAN:
        hpmdict = {'animal': animals, 'day': days, 'session': sessions}
        hpmdict.update({
            f'window {i}': windowshpm[:, i]
            for i in range(windowshpm.shape[1])
        })
        HPMS = pd.DataFrame(hpmdict)
        PCdict = {'animal': animals, 'day': days, 'session': sessions}
        PCdict.update({
            f'window {i}': windowsPC[:, i]
            for i in range(windowsPC.shape[1])
        })
        PCS = pd.DataFrame(PCdict)
        cumuPCdict = {'animal': animals, 'day': days, 'session': sessions}
        cumuPCdict.update({
            f'window {i}': windows_cumuPC[:, i]
            for i in range(windows_cumuPC.shape[1])
        })
        cumuPCS = pd.DataFrame(cumuPCdict)
        if to_csv:
            HPMS.to_csv(os.path.join(out,
                                     f'learning_stats_HPM_bin_{binsize}.csv'),
                        index=False)
            PCS.to_csv(os.path.join(out,
                                    f'learning_stats_PC_bin_{binsize}.csv'),
                       index=False)
            cumuPCS.to_csv(os.path.join(
                out, f'learning_stats_cumuPC_bin_{binsize}.csv'),
                           index=False)
        else:
            fname = os.path.join(out, f'learning_stats_bin_{binsize}_NAN.hdf5')
            HPMS.to_hdf(fname, key='hpm', index=False)
            PCS.to_hdf(fname, key='PC', index=False)
    else:
        PDF = pd.DataFrame({
            'animal': animals,
            'day': days,
            'session': sessions,
            'window': windows,
            'PC': windowsPC,
            'HPM': windowshpm
        })
        if to_csv:
            PDF.to_csv(os.path.join(
                out, f'learning_stats_detail_bin_{binsize}.csv'),
                       index=False)
        else:
            fname = os.path.join(out,
                                 f'learning_stats_bin_{binsize}_NONAN.hdf5')
            PDF.to_hdf(fname, key='detail', index=False)
    sumPDF = pd.DataFrame({
        'animal': sum_animals,
        'day': sum_days,
        'session': sum_sessions,
        'maxPC': maxPCs,
        'maxHPM': maxHPMs,
        'totalPC': tPCs,
        'totalHPM': tHPMs,
        'PC_gain': PCgains,
        'HPM_gain': HPMgains
    })
    if to_csv:
        sumPDF.to_csv(os.path.join(
            out, f'learning_stats_summary_bin_{binsize}.csv'),
                      index=False)
    else:
        sumPDF.to_hdf(fname, key='summary', index=False)
コード例 #40
0
# Y_test = np.zeros((1000, 10))
i = 0
for digit in test_labels:
    Y_test[i][digit] = 1
    i += 1

print("====== Start Training ======")
cost = 1
# i = 0
while(cost > 900):
    cost = neural_network.train(X_train, Y_train, 100)
    # i += 1
    # if i == 1:
    print("(Training...) Cost: ", cost)
        # i = 0
neural_network.train(X_train, Y_train, 1000)
print("(Training...) Cost: ", cost)

print("")
print("===============")
print("Testing....")
print("---------------")

output = neural_network.feedforward(X_test)

rounded_output = np.around(output)
cost = np.sum((rounded_output - Y_test)**2)
print("\ncost: ", cost)


neural_network.save("digits_recognition")
コード例 #41
0
def datapro():
    filepath = 'D:/zq/OneDrive/experiments/2019/20191013/lipcontrol/data3/'
    files = os.listdir(filepath)
    for file in files:
        pattern = re.compile(r'\d+')
        res = re.findall(pattern, file)
        if len(res) == 3 and int(res[1]) >= 0 and int(res[1]) < 960:
            filename = filepath + file
            rawdata = np.memmap(filename, dtype=np.float32, mode='r')
            dataI = []
            dataQ = []
            dataC1 = []
            dataC2 = []
            for channelID in range(0, 8):
                fc = 17350 + 700 * channelID
                data = butter_bandpass_filter(rawdata, fc - 100, fc + 100,
                                              48000)
                f = fc
                I = getI(data, f)
                I = move_average_overlap(I)
                Q = getQ(data, f)
                Q = move_average_overlap(Q)
                decompositionQ = seasonal_decompose(Q,
                                                    freq=10,
                                                    two_sided=False)
                trendQ = decompositionQ.trend
                decompositionI = seasonal_decompose(I,
                                                    freq=10,
                                                    two_sided=False)
                trendI = decompositionI.trend
                trendI = trendI[480:]
                trendQ = trendQ[480:]
                difftrendI = []
                for i in range(5, len(trendI)):
                    difftrendI.append((trendI[i] - trendI[i - 5]) * 1000)
                difftrendQ = []
                for i in range(5, len(trendQ)):
                    difftrendQ.append((trendQ[i] - trendQ[i - 5]) * 1000)
                difftrendI = medfilter(difftrendI)
                difftrendI = medfilter(difftrendI)
                difftrendQ = medfilter(difftrendQ)
                difftrendQ = medfilter(difftrendQ)
                difftrendI = np.around(difftrendI, decimals=6)
                difftrendQ = np.around(difftrendQ, decimals=6)
                # datachord1, datachord2 = chord_extract(trendI, trendQ)
                # plt.figure()
                # plt.plot(datachord2)
                # plt.show()
                if len(dataI):
                    dataI = np.vstack((dataI, difftrendI))
                    dataQ = np.vstack((dataQ, difftrendQ))
                    # dataC1 = np.vstack((dataC1, datachord1))
                    # dataC2 = np.vstack((dataC2, datachord2))
                else:
                    dataI = difftrendI
                    dataQ = difftrendQ
                    # dataC1 = datachord1
                    # dataC2 = datachord2
            dataIQ = np.vstack((dataI, dataQ))
            dataIQ = np.transpose(dataIQ)
            # dataC = np.vstack((dataC1, dataC2))
            # dataC = np.transpose(dataC)
            np.savez_compressed('./data/lipcontrol/cutdata12/datapre%d-%d-%d' %
                                (int(res[0]), int(res[1]), int(res[2])),
                                datapre=dataIQ)
コード例 #42
0
def pprint(seq):
    seq = np.array(seq)
    dim = (seq.shape[1] - 1) // 2
    seq = np.char.mod('%d', np.around(seq))
    seq[:, dim:dim + 1] = ' '
    print("\n".join(["".join(x) for x in seq.tolist()]))
コード例 #43
0
     m12eff = meff(runtodo, time, r12)
     m12dmslist.append(m12eff['Seff'] + m12eff['DMeff'])
     ms02list.append(m02['Seff'])
     Selist.append(eff['Seff'])
     DMelist.append(eff['DMeff'])
     Gelist.append(eff['Geff'])
     Mvirlist.append(eff['mvir'])
     Mstarlist.append(eff['mstar'])
 m12dmslist = np.array(m12dmslist)
 ms02list = np.array(ms02list)
 Selist = np.array(Selist)
 DMelist = np.array(DMelist)
 Gelist = np.array(Gelist)
 Mvirlist = np.array(Mvirlist)
 Mstarlist = np.array(Mstarlist)
 Mvirround = np.around(Mvirlist / 1e10, decimals=1)
 Mvirstr = Mvirstr + '&      $' + str(Mvirround[0]) + '$& $\,_{' + str(
     Mvirround[1]) + '}^{' + str(Mvirround[2]) + '} $'
 MsMvround = np.around(Mstarlist * 1e3 / Mvirlist, decimals=2)
 MsMvstr = MsMvstr + '&      $' + str(MsMvround[0]) + '$& $\,_{' + str(
     MsMvround[1]) + '}^{' + str(MsMvround[2]) + '} $'
 Ms02Mvround = np.around(ms02list * 1e3 / Mvirlist, decimals=2)
 Ms02Mvstr = Ms02Mvstr + '&      $' + str(
     Ms02Mvround[0]) + '$& $\,_{' + str(Ms02Mvround[1]) + '}^{' + str(
         Ms02Mvround[2]) + '} $'
 Ms02round = np.around(ms02list / 1e8, decimals=2)
 Ms02str = Ms02str + '&      $' + str(Ms02round[0]) + '$& $\,_{' + str(
     Ms02round[1]) + '}^{' + str(Ms02round[2]) + '} $'
 Meffround = np.around((Selist + DMelist) / 1e8, decimals=2)
 Meffstr = Meffstr + '&      $' + str(Meffround[0]) + '$& $\,_{' + str(
     Meffround[1]) + '}^{' + str(Meffround[2]) + '} $'
コード例 #44
0
        Ventricles_ICV_forecast[i, :, 0] = most_recent_Ventricles_ICV[i]
        Ventricles_ICV_forecast[i, :, 1] = most_recent_Ventricles_ICV[
            i] - Ventricles_ICV_default_50pcMargin
        Ventricles_ICV_forecast[i, :, 2] = most_recent_Ventricles_ICV[
            i] + Ventricles_ICV_default_50pcMargin
    else:
        # Subject has no imaging history, so we'll take a typical
        # ventricles volume of 25000 & wide confidence interval +/-20000
        Ventricles_ICV_forecast[i, :, 0] = Ventricles_ICV_typical
        Ventricles_ICV_forecast[
            i, :, 1] = Ventricles_ICV_typical - Ventricles_ICV_broad_50pcMargin
        Ventricles_ICV_forecast[
            i, :, 2] = Ventricles_ICV_typical + Ventricles_ICV_broad_50pcMargin

Ventricles_ICV_forecast = np.around(
    1e9 * Ventricles_ICV_forecast,
    0) / 1e9  # round to 9 decimal places to match MATLAB equivalent

## Now construct the forecast spreadsheet and output it.
print('Constructing the output spreadsheet {0} ...'.format(outputFile))
submission_table = pd.DataFrame()
# * Repeated matrices - compare with submission template
submission_table['RID'] = D2_SubjList.repeat(nForecasts)
submission_table['ForecastMonth'] = np.tile(range(1, nForecasts + 1),
                                            (N_D2, 1)).flatten()
# * Submission dates - compare with submission template
startDate = dt.datetime(2010, 5, 1)
endDate = startDate + relativedelta(months=+nForecasts - 1)
ForecastDates = [startDate]
while ForecastDates[-1] < endDate:
    ForecastDates.append(ForecastDates[-1] + relativedelta(months=+1))
コード例 #45
0
def main():
    # getting the data from the CSV file
    data = np.genfromtxt("HW_2171_KMEANS_DATA__v502.csv",
                         delimiter=",",
                         skip_header=True)

    n = len(data)  # the total number of data points in the given dataset
    num_of_clusters = []  # initializing the list of number of clusters
    cluster_sse = [
    ]  # initializing the list of SSEs (sum of squared errors) for each of the number of clusters
    k = 1  # initializing the number of clusters k to 1

    # we run this loop for number of clusters k = 1 to 15
    while k <= 20:
        clusters = {
        }  # initializing the dictionary of clusters for the current k as follows:
        # cluster number -> centroid, data points in cluster, sse
        old_centroids = [
        ]  # initializing the list of the current centroids of the clusters
        num_of_clusters.append(
            k
        )  # appending the current value of k to the list of number of clusters

        # for every cluster in k, appending the initial seed points to the clusters dictionary as well as the
        # list of current centroids of the clusters
        for i in range(0, k):
            clusters[i] = [data[i + 10], [], None]
            old_centroids.append(data[i + 10])

        for _ in range(0, 1000):

            # computing the distance of every point to the centroids of each cluster and assigning the data point
            # to the cluster with the minimum distance to its centroid
            for j in range(0, n):
                minimum_distance = math.inf
                cluster = math.nan
                for l in range(0, len(clusters)):
                    distance = math.sqrt(
                        math.pow(data[j][0] - clusters[l][0][0], 2) +
                        math.pow(data[j][1] - clusters[l][0][1], 2) +
                        math.pow(data[j][2] - clusters[l][0][2], 2))
                    if distance < minimum_distance:
                        minimum_distance = distance
                        cluster = l
                clusters[cluster][1].append(data[j])

            new_centroids = [
            ]  # initializing the list of the new centroids of the clusters

            # computing the new centroids of each of the clusters
            for ind in range(0, len(clusters)):
                new_centroid = [sum(x) for x in zip(*clusters[ind][1])]
                cluster_len = len(clusters[ind][1])
                new_centroid = [x / cluster_len for x in new_centroid]
                new_centroid = np.around(new_centroid, decimals=2)
                new_centroids.append(new_centroid)

            if np.array_equal(old_centroids, new_centroids):
                # stop if the newly computed centroids as the same as the old centroids of the clusters
                break
            else:
                # otherwise, assign the newly computed centroids as the current centroids of the clusters
                old_centroids[:] = new_centroids[:]
                for ind in range(0, len(clusters)):
                    clusters[ind][0] = new_centroids[ind]
                    clusters[ind][1] = []

        if k == 7:
            # plot the clusters when k = 7
            plot_clusters(clusters)

        sse = 0  # initializing the value of the sum of squared errors

        # compute the sum of squared errors of each of the clusters in k and summing them up
        # SSE = summation of (data point - centroid) ^ 2 for every data point in the cluster
        # repeating the above formula in every cluster and summing all the SSEs gives the final SSE
        for num in range(0, len(clusters)):
            centroid = clusters[num][0]
            cluster_data = clusters[num][1]
            sum_sse = 0
            for index in range(0, len(cluster_data)):
                sum_sse += math.pow(cluster_data[index][0] - centroid[0], 2) + \
                           math.pow(cluster_data[index][1] - centroid[1], 2) + \
                           math.pow(cluster_data[index][2] - centroid[2], 2)
            sse += sum_sse

        # appending the value of the SSE to the list of SSEs, corresponding to the current k
        clusters[cluster][2] = sse
        cluster_sse.append(sse)

        # incrementing the value of k, i.e., the number of clusters
        k += 1

    # plotting the graph of number of clusters v's their SSEs
    plot(num_of_clusters, cluster_sse)
コード例 #46
0
    def _bottom_plot(self):
        """
        Create the bottom subplot to view the projected slice.

        Parameters
        ----------
        None

        Returns
        -------
        Bokeh figure
        """
        # List of the current positions of the sliders
        self.input_point_list = [point.value for point in self.slider_dict.values()]

        # Find the title of the x input and match it with the data
        x_idx = self.x_input_select.value
        x_data = self.predict_inputs[x_idx]
        # Find the position of the y_input slider
        y_value = self.y_input_slider.value

        # Rounds the y_data to match the predict_inputs value
        subplot_value_index = np.where(
            np.around(self.predict_inputs[self.y_input_select.value], 5) ==
            np.around(y_value, 5))[0]

        # Make slice in Z data at the point calculated before and add it to the data source
        z_data = self.Z[subplot_value_index, :].flatten()

        x = self.slider_source.data[x_idx]
        y = z_data

        # Update the data source with new data
        self.bottom_plot_source.data = dict(x=x, y=y)

        # Create and format figure
        self.bottom_plot_fig = bottom_plot_fig = figure(
            plot_width=550, plot_height=250,
            title="{} vs {}".format(x_idx, self.output_select.value), tools="")
        bottom_plot_fig.xaxis.axis_label = x_idx
        bottom_plot_fig.yaxis.axis_label = self.output_select.value
        bottom_plot_fig.line(x='x', y='y', source=self.bottom_plot_source)
        bottom_plot_fig.x_range.range_padding = 0.02
        bottom_plot_fig.y_range.range_padding = 0.1

        # Determine distance and alpha opacity of training points
        if self.is_structured_meta_model:
            data = self._structured_training_points(compute_distance=True)
        else:
            data = self._unstructured_training_points(compute_distance=True)

        self.bottom_alphas = 1.0 - data[:, 2] / self.dist_range

        # Training data scatter plot
        scatter_renderer = bottom_plot_fig.scatter(x=data[:, 0], y=data[:, 3], line_color=None,
                                                   fill_color='#000000',
                                                   fill_alpha=self.bottom_alphas.tolist())

        bottom_plot_fig.add_tools(HoverTool(renderers=[scatter_renderer], tooltips=[
            (x_idx + " (train)", '@x'),
            (self.output_select.value + " (train)", '@y'),
        ]))

        span_width = self.dist_range * (max(x_data) - min(x_data))

        # Set the right_plot data source to new values
        self.bottom_plot_scatter_source.data = dict(
            bot_slice_x=x_data, bot_slice_y=np.repeat(y_value, self.resolution),
            upper_dashed=[i + span_width for i in np.repeat(y_value, self.resolution)],
            lower_dashed=[i - span_width for i in np.repeat(y_value, self.resolution)])

        self.contour_plot.line(
            'bot_slice_x', 'bot_slice_y', source=self.bottom_plot_scatter_source, color='black',
            line_width=2)
        self.contour_plot.line(
            'bot_slice_x', 'upper_dashed', line_dash='dashed',
            source=self.bottom_plot_scatter_source, color='black', line_width=2)
        self.contour_plot.line(
            'bot_slice_x', 'lower_dashed', line_dash='dashed',
            source=self.bottom_plot_scatter_source, color='black', line_width=2)

        return self.bottom_plot_fig
コード例 #47
0
    gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
    gray = cv2.GaussianBlur(gray, (33, 33), 1)

    #get circles by Hough-Gradient method
    circles = cv2.HoughCircles(gray,
                               cv2.HOUGH_GRADIENT,
                               1,
                               80,
                               param1=10,
                               param2=70,
                               minRadius=1,
                               maxRadius=80)

    #draw Pien if there are circles
    if circles is not None:
        circles = np.uint16(np.around(circles))
        for i in circles[0, :]:
            #i[0]=x-axis i[1]=y-axis i[2]=radius
            x = i[0]
            y = i[1]
            r = i[2]
            #cv2.circle(img, center, radius, color, thickness)
            #cv2.ellipse(img, center, axes, angle, startAngle, endAngle, color, thickness)

            #face
            cv2.circle(frame, (x, y), r, (0, 215, 255), -1)
            #right evebrow
            cv2.ellipse(frame, (x + int(r * 2 / 3), y - int(r * 2 / 3)),
                        (int(r / 3), int(r / 4)), 0, 90, 165, (19, 69, 139), 3)
            #left evebrow
            cv2.ellipse(frame, (x - int(r * 2 / 3), y - int(r * 2 / 3)),
コード例 #48
0
    def _probabilities_to_percentiles(
            self, forecast_probabilities, percentiles, bounds_pairing):
        """
        Conversion of probabilities to percentiles through the construction
        of an cumulative distribution function. This is effectively
        constructed by linear interpolation from the probabilities associated
        with each threshold to a set of percentiles.

        Args:
            forecast_probabilities (Iris cube):
                Cube with a threshold coordinate.
            percentiles (Numpy array):
                Array of percentiles, at which the corresponding values will be
                calculated.
            bounds_pairing (Tuple):
                Lower and upper bound to be used as the ends of the
                cumulative distribution function.

        Returns:
            percentile_cube (Iris cube):
                Cube containing values for the required diagnostic e.g.
                air_temperature at the required percentiles.

        """
        threshold_coord = forecast_probabilities.coord("threshold")
        threshold_unit = forecast_probabilities.coord("threshold").units
        threshold_points = threshold_coord.points

        # Ensure that the percentile dimension is first, so that the
        # conversion to a 2d array produces data in the desired order.
        forecast_probabilities = (
            enforce_coordinate_ordering(
                forecast_probabilities, threshold_coord.name()))
        prob_slices = convert_cube_data_to_2d(
            forecast_probabilities, coord=threshold_coord.name())

        # The requirement below for a monotonically changing probability
        # across thresholds can be thwarted by precision errors of order 1E-10,
        # as such, here we round to a precision of 9 decimal places.
        prob_slices = np.around(prob_slices, 9)

        # Invert probabilities for data thresholded above thresholds.
        relation = forecast_probabilities.attributes['relative_to_threshold']
        if relation == 'above':
            probabilities_for_cdf = 1 - prob_slices
        elif relation == 'below':
            probabilities_for_cdf = prob_slices
        else:
            msg = ("Probabilities to percentiles only implemented for "
                   "thresholds above or below a given value."
                   "The relation to threshold is given as {}".format(relation))
            raise NotImplementedError(msg)

        threshold_points, probabilities_for_cdf = (
            self._add_bounds_to_thresholds_and_probabilities(
                threshold_points, probabilities_for_cdf, bounds_pairing))

        if np.any(np.diff(probabilities_for_cdf) < 0):
            msg = ("The probability values used to construct the "
                   "Cumulative Distribution Function (CDF) "
                   "must be ascending i.e. in order to yield "
                   "a monotonically increasing CDF."
                   "The probabilities are {}".format(probabilities_for_cdf))
            warnings.warn(msg)

        # Convert percentiles into fractions.
        percentiles = [x/100.0 for x in percentiles]

        forecast_at_percentiles = (
            np.empty((len(percentiles), probabilities_for_cdf.shape[0])))
        for index in range(probabilities_for_cdf.shape[0]):
            forecast_at_percentiles[:, index] = np.interp(
                percentiles, probabilities_for_cdf[index, :],
                threshold_points)

        # Convert percentiles back into percentages.
        percentiles = [x*100.0 for x in percentiles]

        # Reshape forecast_at_percentiles, so the percentiles dimension is
        # first, and any other dimension coordinates follow.
        forecast_at_percentiles = (
            restore_non_probabilistic_dimensions(
                forecast_at_percentiles, forecast_probabilities,
                threshold_coord.name(), len(percentiles)))

        for template_cube in forecast_probabilities.slices_over(
                threshold_coord.name()):
            template_cube.rename(
                template_cube.name().replace("probability_of_", ""))
            template_cube.remove_coord(threshold_coord.name())
            template_cube.attributes.pop('relative_to_threshold')
            break
        percentile_cube = create_cube_with_percentiles(
            percentiles, template_cube, forecast_at_percentiles,
            custom_name='percentile', cube_unit=threshold_unit)
        return percentile_cube
コード例 #49
0
def get_res(x_train, y_train, x_test, y_test):

    knn = KNeighborsClassifier()
    knn.fit(x_train, y_train)

    lg = LogisticRegression(penalty='l2')
    lg.fit(x_train, y_train)

    dtc = DecisionTreeClassifier()
    dtc.fit(x_train, y_train)

    gb = GradientBoostingClassifier(n_estimators=200)
    gb.fit(x_train, y_train)

    ab = AdaBoostClassifier()
    ab.fit(x_train, y_train)

    gnb = GaussianNB()
    gnb.fit(x_train, y_train)

    svm = SVC()
    svm.fit(x_train, y_train)

    mnb = MultinomialNB(alpha=0.01)
    mnb.fit(x_train, y_train)

    bnb = BernoulliNB(alpha=1.0,
                      binarize=0.31,
                      fit_prior=True,
                      class_prior=None)
    bnb.fit(x_train, y_train)

    rtc = RandomForestClassifier(n_estimators=10,
                                 max_depth=20,
                                 random_state=47)
    rtc.fit(x_train, y_train)

    num_list = [
        knn.score(x_test, y_test),
        lg.score(x_test, y_test),
        dtc.score(x_test, y_test),
        gb.score(x_test, y_test),
        ab.score(x_test, y_test),
        gnb.score(x_test, y_test),
        svm.score(x_test, y_test),
        mnb.score(x_test, y_test),
        bnb.score(x_test, y_test),
        rtc.score(x_test, y_test)
    ]
    name_list = [
        'KNN', 'Logistic', 'DecisionTree', 'GradientBoosting', 'AdaBoost',
        'GaussianNB', 'SVC', 'MultinomialNB', 'BernoulliNB', 'RandomForest'
    ]
    plt.title('title')
    num_list = np.around(num_list, decimals=3)
    autolabel(
        plt.bar(range(len(num_list)),
                num_list,
                color='rb',
                tick_label=name_list,
                width=0.4))
    plt.show()
コード例 #50
0
ファイル: image.py プロジェクト: EnSlavingBlair/Coincidences
    def _scale_internal(self, type=None, option='old', bscale=None, bzero=None,
                        blank=0):
        """
        This is an internal implementation of the `scale` method, which
        also supports handling BLANK properly.

        TODO: This is only needed for fixing #3865 without introducing any
        public API changes.  We should support BLANK better when rescaling
        data, and when that is added the need for this internal interface
        should go away.

        Note: the default of ``blank=0`` merely reflects the current behavior,
        and is not necessarily a deliberate choice (better would be to disallow
        conversion of floats to ints without specifying a BLANK if there are
        NaN/inf values).
        """

        if self.data is None:
            return

        # Determine the destination (numpy) data type
        if type is None:
            type = BITPIX2DTYPE[self._bitpix]
        _type = getattr(np, type)

        # Determine how to scale the data
        # bscale and bzero takes priority
        if bscale is not None and bzero is not None:
            _scale = bscale
            _zero = bzero
        elif bscale is not None:
            _scale = bscale
            _zero = 0
        elif bzero is not None:
            _scale = 1
            _zero = bzero
        elif (option == 'old' and self._orig_bscale is not None and
                self._orig_bzero is not None):
            _scale = self._orig_bscale
            _zero = self._orig_bzero
        elif option == 'minmax' and not issubclass(_type, np.floating):
            min = np.minimum.reduce(self.data.flat)
            max = np.maximum.reduce(self.data.flat)

            if _type == np.uint8:  # uint8 case
                _zero = min
                _scale = (max - min) / (2.0 ** 8 - 1)
            else:
                _zero = (max + min) / 2.0

                # throw away -2^N
                nbytes = 8 * _type().itemsize
                _scale = (max - min) / (2.0 ** nbytes - 2)
        else:
            _scale = 1
            _zero = 0

        # Do the scaling
        if _zero != 0:
            # 0.9.6.3 to avoid out of range error for BZERO = +32768
            # We have to explcitly cast _zero to prevent numpy from raising an
            # error when doing self.data -= zero, and we do this instead of
            # self.data = self.data - zero to avoid doubling memory usage.
            np.add(self.data, -_zero, out=self.data, casting='unsafe')
            self._header['BZERO'] = _zero
        else:
            try:
                del self._header['BZERO']
            except KeyError:
                pass

        if _scale and _scale != 1:
            self.data = self.data / _scale
            self._header['BSCALE'] = _scale
        else:
            try:
                del self._header['BSCALE']
            except KeyError:
                pass

        # Set blanks
        if blank is not None and issubclass(_type, np.integer):
            # TODO: Perhaps check that the requested BLANK value fits in the
            # integer type being scaled to?
            self.data[np.isnan(self.data)] = blank
            self._header['BLANK'] = blank

        if self.data.dtype.type != _type:
            self.data = np.array(np.around(self.data), dtype=_type)

        # Update the BITPIX Card to match the data
        self._bitpix = DTYPE2BITPIX[self.data.dtype.name]
        self._bzero = self._header.get('BZERO', 0)
        self._bscale = self._header.get('BSCALE', 1)
        self._blank = blank
        self._header['BITPIX'] = self._bitpix

        # Since the image has been manually scaled, the current
        # bitpix/bzero/bscale now serve as the 'original' scaling of the image,
        # as though the original image has been completely replaced
        self._orig_bitpix = self._bitpix
        self._orig_bzero = self._bzero
        self._orig_bscale = self._bscale
        self._orig_blank = self._blank
コード例 #51
0
def _calc_seam(baseline, polygon, angle, im_feats, bias=150):
    """
    Calculates seam between baseline and ROI boundary on one side.

    Adds a baseline-distance-weighted bias to the feature map, masks
    out the bounding polygon and rotates the line so it is roughly
    level.
    """
    MASK_VAL = 99999
    r, c = draw.polygon(polygon[:, 1], polygon[:, 0])
    c_min, c_max = int(polygon[:, 0].min()), int(polygon[:, 0].max())
    r_min, r_max = int(polygon[:, 1].min()), int(polygon[:, 1].max())
    patch = im_feats[r_min:r_max + 2, c_min:c_max + 2].copy()
    # bias feature matrix by distance from baseline
    mask = np.ones_like(patch)
    for line_seg in zip(baseline[:-1] - (c_min, r_min),
                        baseline[1:] - (c_min, r_min)):
        line_locs = draw.line(line_seg[0][1], line_seg[0][0], line_seg[1][1],
                              line_seg[1][0])
        mask[line_locs] = 0
    dist_bias = distance_transform_cdt(mask)
    # absolute mask
    mask = np.ones_like(patch, dtype=bool)
    mask[r - r_min, c - c_min] = False
    # dilate mask to compensate for aliasing during rotation
    mask = binary_erosion(mask, iterations=2)
    # combine weights with features
    patch[mask] = MASK_VAL
    patch += (dist_bias * (np.mean(patch[patch != MASK_VAL]) / bias))
    extrema = baseline[(0, -1), :] - (c_min, r_min)
    # scale line image to max 600 pixel width
    scale = min(1.0, 600 / (c_max - c_min))
    tform, rotated_patch = _rotate(patch,
                                   angle,
                                   center=extrema[0],
                                   scale=scale,
                                   cval=MASK_VAL)
    # ensure to cut off padding after rotation
    x_offsets = np.sort(np.around(tform.inverse(extrema)[:, 0]).astype('int'))
    rotated_patch = rotated_patch[:, x_offsets[0]:x_offsets[1] + 1]
    # infinity pad for seamcarve
    rotated_patch = np.pad(rotated_patch, ((1, 1), (0, 0)),
                           mode='constant',
                           constant_values=np.inf)
    r, c = rotated_patch.shape
    # fold into shape (c, r-2 3)
    A = np.lib.stride_tricks.as_strided(
        rotated_patch, (c, r - 2, 3),
        (rotated_patch.strides[1], rotated_patch.strides[0],
         rotated_patch.strides[0]))
    B = rotated_patch[1:-1, 1:].swapaxes(0, 1)
    backtrack = np.zeros_like(B, dtype='int')
    T = np.empty((B.shape[1]), 'f')
    R = np.arange(-1, len(T) - 1)
    for i in np.arange(c - 1):
        A[i].min(1, T)
        backtrack[i] = A[i].argmin(1) + R
        B[i] += T
    # backtrack
    seam = []
    j = np.argmin(rotated_patch[1:-1, -1])
    for i in range(c - 2, -2, -1):
        seam.append((i + x_offsets[0] + 1, j))
        j = backtrack[i, j]
    seam = np.array(seam)[::-1]
    seam_mean = seam[:, 1].mean()
    seam_std = seam[:, 1].std()
    seam[:, 1] = np.clip(seam[:, 1], seam_mean - seam_std,
                         seam_mean + seam_std)
    # rotate back
    seam = tform(seam).astype('int')
    # filter out seam points in masked area of original patch/in padding
    seam = seam[seam.min(axis=1) >= 0, :]
    m = (seam < mask.shape[::-1]).T
    seam = seam[np.logical_and(m[0], m[1]), :]
    seam = seam[np.invert(mask[seam.T[1], seam.T[0]])]
    seam += (c_min, r_min)
    return seam
コード例 #52
0
        df_stats['host'], df_stats['pathogen'], df_stats['unassigned_reads'],
        df_stats['unmapped_reads'], df_stats['trimmed_reads']
    ],
                        axis=1)
    df_comb.columns = [
        'host', 'pathogen', 'unassigned reads', 'unmapped reads',
        'trimmed reads'
    ]
    # calculate max total no. of reads and define x label values, round to 7 digits
    max_limit = round(df_stats['total_raw_reads'].max(), -6)
    if max_limit > 0:  # if total number of reads is higher than 10^6 specify label ticks adjusted to this magnitude
        # divide max_limit by 20 (number of label ticks) to get step
        step2 = int(max_limit / 20)
        # define label ticks
        array_labels = np.arange(0, max_limit + step2, step=step2)
        array_labels2 = np.around(array_labels)
        # set m value to specify format of scientific format of x label ticks
        m = 6
    else:  # define label ticks if number of reads is smaller than 10^6
        step = int(df_stats['total_raw_reads'].max() / 20)
        array_labels = np.arange(0,
                                 df_stats['total_raw_reads'].max() + step,
                                 step=step)
        array_labels2 = np.around(array_labels)
        # set m value to specify format of scientific format of x label ticks
        m = 0
else:
    df_comb = pd.concat([
        df_stats['host'], df_stats['pathogen'], df_stats['unassigned_reads'],
        df_stats['unmapped_reads']
    ],
コード例 #53
0
    def test_DiagnosticSummaryStats(self):
        structured_datapath = auto_load_processed(
            self.structured_cycler_file_path_trunc)

        f = DiagnosticSummaryStats(structured_datapath)
        self.assertTrue(f.validate()[0])

        f.create_features()

        self.assertEqual(f.features.shape[1], 54)
        self.assertListEqual(
            [f.features.columns[0], f.features.columns[41]],
            ["var_charging_capacity", "square_discharging_dQdV"],
        )
        self.assertListEqual(
            [f.features.columns[42], f.features.columns[53]],
            [
                "diag_sum_diff_0_1_rpt_0.2Cdischarge_capacity",
                "diag_sum_diff_0_1_rpt_2Ccharge_energy"
            ],
        )
        x = [
            -3.622991274215596, -1.4948801528128568, -2.441732890889216,
            -0.794422489658189, 0.4889470327970021, 0.7562360890191123,
            -0.9122534588595697, -3.771727344982484, -1.6613278517299095,
            -3.9279757071656616, 0.1418911233780052, 0.7493913209640308,
            0.6755655006191633, -1.0823827139302122, -2.484906394983077,
            -0.8949449222504844, -1.7523322777749897, -1.4575307327423712,
            0.4467463228405364, 1.3265006178265961, 0.2422557417274141,
            -2.6373799375134594, -1.230847957965504, -2.046540216421213,
            0.2334339752067063, 0.8239822694093881, 1.2085578295115413,
            0.06687710057927358, -1.0135736732168983, 0.12101479889802537,
            -2.2735196264247866, 0.37844357940755063, 1.425189114118929,
            1.8786507359201035, 1.6731897281287798, -1.1875358619917917,
            0.1361208058450041, -1.8275104616090456, -0.2665523054105704,
            1.1375831683815445, 1.84972885518774, 1.5023615714170622,
            -0.00472514151532623, -0.003475275535937185, -0.008076419207993832,
            -0.008621551983451683, 7.413107429038043e-05,
            0.0013748657878274915, -0.005084993748595586,
            -0.005675990891556979, -0.002536196993382343,
            -0.0018987653783979423, -0.00016598153694586686,
            -0.00105148083990717
        ]
        computed = f.features.iloc[0].tolist()
        for indx, value in enumerate(x):
            precision = 5
            self.assertEqual(np.around(np.float32(value), precision),
                             np.around(np.float32(computed[indx]), precision))

        self.assertEqual(
            np.around(f.features['var_discharging_capacity'].iloc[0], 6),
            np.around(-3.771727344982484, 6))

        structured_datapath_loc2 = os.path.join(
            TEST_FILE_DIR,
            "PredictionDiagnostics_000136_00002D_truncated_structure.json")
        structured_datapath2 = auto_load_processed(structured_datapath_loc2)

        f2 = DiagnosticSummaryStats(structured_datapath2)
        self.assertTrue(f2.validate()[0])

        f2.create_features()

        x = [
            -2.4602845133649374, -0.7912059829821004, -1.3246516129064152,
            -0.5577484175221676, 0.22558675296269257, 1.4107424811304434,
            0.44307560772987753, -2.968731527885897, -1.003386799815887,
            -1.2861922579124305, 0.010393880890967514, 0.4995216948726259,
            1.4292366107477192, 0.2643953383205679, -1.3377336978836682,
            -0.21470956778563194, -0.7617667690573674, -0.47886877345098366,
            0.23547492071796852, 1.9699615602673914, 1.566893893282218,
            -1.8282011110054657, -0.46311299104523346, -0.7166620260036703,
            0.06268262404068164, 0.5400910355865228, 2.00139593781454,
            1.4038773986895716, 0.46799197793006897, 0.5117431282997131,
            -1.4615182876586914, 1.2889420237956628, 2.6205135712205725,
            2.176016330718994, 3.1539101600646973, -0.9218153953552246,
            0.23360896110534668, -1.1706260442733765, -0.5070897459236073,
            1.1722059184617377, 2.0029776096343994, 1.7837194204330444,
            -0.021425815851990795, -0.020270314430328763,
            -0.028696091773302315, -0.02782930233422708, -0.017478835661355316,
            -0.019788159842565697, -0.021354840746757066,
            -0.021056601447539146, -0.026599426370616085, -0.03017946374275189,
            -0.017983518726387225, -0.01771638489069907
        ]
        computed = f2.features.iloc[0].tolist()
        for indx, value in enumerate(x):
            precision = 5
            self.assertEqual(np.around(np.float32(value), precision),
                             np.around(np.float32(computed[indx]), precision))
コード例 #54
0
def extract_polygons(im: Image.Image, bounds: Dict[str, Any]) -> Image.Image:
    """
    Yields the subimages of image im defined in the list of bounding polygons
    with baselines preserving order.

    Args:
        im: Input image
        bounds: A list of dicts in baseline:
            ```
            {'type': 'baselines',
             'lines': [{'baseline': [[x_0, y_0], ... [x_n, y_n]],
                        'boundary': [[x_0, y_0], ... [x_n, y_n]]},
                       ....]
            }
            ```
            or bounding box format:
            ```
            {'boxes': [[x_0, y_0, x_1, y_1], ...],
             'text_direction': 'horizontal-lr'}
            ```

    Yields:
        The extracted subimage
    """
    if 'type' in bounds and bounds['type'] == 'baselines':
        # select proper interpolation scheme depending on shape
        if im.mode == '1':
            order = 0
            im = im.convert('L')
        else:
            order = 1
        im = np.array(im)

        for line in bounds['lines']:
            if line['boundary'] is None:
                raise KrakenInputException('No boundary given for line')
            pl = np.array(line['boundary'])
            baseline = np.array(line['baseline'])
            c_min, c_max = int(pl[:, 0].min()), int(pl[:, 0].max())
            r_min, r_max = int(pl[:, 1].min()), int(pl[:, 1].max())

            if (pl < 0).any() or (pl.max(axis=0)[::-1] >= im.shape[:2]).any():
                raise KrakenInputException(
                    'Line polygon outside of image bounds')
            if (baseline < 0).any() or (baseline.max(axis=0)[::-1] >=
                                        im.shape[:2]).any():
                raise KrakenInputException('Baseline outside of image bounds')

            # fast path for straight baselines requiring only rotation
            if len(baseline) == 2:
                baseline = baseline.astype(float)
                # calculate direction vector
                lengths = np.linalg.norm(np.diff(baseline.T), axis=0)
                p_dir = np.mean(np.diff(baseline.T) * lengths / lengths.sum(),
                                axis=1)
                p_dir = (p_dir.T / np.sqrt(np.sum(p_dir**2, axis=-1)))
                angle = np.arctan2(p_dir[1], p_dir[0])
                patch = im[r_min:r_max + 1, c_min:c_max + 1].copy()
                offset_polygon = pl - (c_min, r_min)
                r, c = draw.polygon(offset_polygon[:, 1], offset_polygon[:, 0])
                mask = np.zeros(patch.shape[:2], dtype=bool)
                mask[r, c] = True
                patch[mask != True] = 0
                extrema = offset_polygon[(0, -1), :]
                # scale line image to max 600 pixel width
                tform, rotated_patch = _rotate(patch,
                                               angle,
                                               center=extrema[0],
                                               scale=1.0,
                                               cval=0)
                i = Image.fromarray(rotated_patch.astype('uint8'))
            # normal slow path with piecewise affine transformation
            else:
                if len(pl) > 50:
                    pl = approximate_polygon(pl, 2)
                full_polygon = subdivide_polygon(pl, preserve_ends=True)
                pl = geom.MultiPoint(full_polygon)

                bl = zip(baseline[:-1:], baseline[1::])
                bl = [geom.LineString(x) for x in bl]
                cum_lens = np.cumsum([0] + [line.length for line in bl])
                # distance of intercept from start point and number of line segment
                control_pts = []
                for point in pl.geoms:
                    npoint = np.array(point.coords)[0]
                    line_idx, dist, intercept = min(
                        ((idx, line.project(point),
                          np.array(
                              line.interpolate(line.project(point)).coords))
                         for idx, line in enumerate(bl)),
                        key=lambda x: np.linalg.norm(npoint - x[2]))
                    # absolute distance from start of line
                    line_dist = cum_lens[line_idx] + dist
                    intercept = np.array(intercept)
                    # side of line the point is at
                    side = np.linalg.det(
                        np.array([[
                            baseline[line_idx + 1][0] - baseline[line_idx][0],
                            npoint[0] - baseline[line_idx][0]
                        ],
                                  [
                                      baseline[line_idx + 1][1] -
                                      baseline[line_idx][1],
                                      npoint[1] - baseline[line_idx][1]
                                  ]]))
                    side = np.sign(side)
                    # signed perpendicular distance from the rectified distance
                    per_dist = side * np.linalg.norm(npoint - intercept)
                    control_pts.append((line_dist, per_dist))
                # calculate baseline destination points
                bl_dst_pts = baseline[0] + np.dstack(
                    (cum_lens, np.zeros_like(cum_lens)))[0]
                # calculate bounding polygon destination points
                pol_dst_pts = np.array([
                    baseline[0] + (line_dist, per_dist)
                    for line_dist, per_dist in control_pts
                ])
                # extract bounding box patch
                c_dst_min, c_dst_max = int(pol_dst_pts[:, 0].min()), int(
                    pol_dst_pts[:, 0].max())
                r_dst_min, r_dst_max = int(pol_dst_pts[:, 1].min()), int(
                    pol_dst_pts[:, 1].max())
                output_shape = np.around(
                    (r_dst_max - r_dst_min + 1, c_dst_max - c_dst_min + 1))
                patch = im[r_min:r_max + 1, c_min:c_max + 1].copy()
                # offset src points by patch shape
                offset_polygon = full_polygon - (c_min, r_min)
                offset_baseline = baseline - (c_min, r_min)
                # offset dst point by dst polygon shape
                offset_bl_dst_pts = bl_dst_pts - (c_dst_min, r_dst_min)
                offset_pol_dst_pts = pol_dst_pts - (c_dst_min, r_dst_min)
                # mask out points outside bounding polygon
                mask = np.zeros(patch.shape[:2], dtype=bool)
                r, c = draw.polygon(offset_polygon[:, 1], offset_polygon[:, 0])
                mask[r, c] = True
                patch[mask != True] = 0
                # estimate piecewise transform
                src_points = np.concatenate((offset_baseline, offset_polygon))
                dst_points = np.concatenate(
                    (offset_bl_dst_pts, offset_pol_dst_pts))
                tform = PiecewiseAffineTransform()
                tform.estimate(src_points, dst_points)
                o = warp(patch,
                         tform.inverse,
                         output_shape=output_shape,
                         preserve_range=True,
                         order=order)
                i = Image.fromarray(o.astype('uint8'))
            yield i.crop(i.getbbox()), line
    else:
        if bounds['text_direction'].startswith('vertical'):
            angle = 90
        else:
            angle = 0
        for box in bounds['boxes']:
            if isinstance(box, tuple):
                box = list(box)
            if (box < [0, 0, 0, 0] or box[::2] >= [im.size[0], im.size[0]]
                    or box[1::2] >= [im.size[1], im.size[1]]):
                logger.error('bbox {} is outside of image bounds {}'.format(
                    box, im.size))
                raise KrakenInputException('Line outside of image bounds')
            yield im.crop(box).rotate(angle, expand=True), box
コード例 #55
0
import os
import numpy as np


def subpbs(N, gsw, basestate, basename):
    fout = 'gsw' + str(np.round(gsw, 2))
    for j in range(1, N + 1):
        fname = basename + '/' + fout + '_' + str(j) + '.dmc.pbs'
        os.system('qsub ' + fname)


if __name__ == '__main__':
    for basestate in np.arange(10):
        for gsw in np.arange(1.0, 0.0, -0.1):
            if (gsw == 1.0): N = 1
            else: N = 10
            subpbs(N,
                   gsw,
                   basestate,
                   basename='gsw' + str(np.around(gsw, 2)) + 'b' +
                   str(basestate))
コード例 #56
0
    def test_get_fractional_quantity_remaining_nx(self):
        processed_cycler_run_path_1 = os.path.join(
            TEST_FILE_DIR, "PreDiag_000233_00021F_truncated_structure.json")
        structured_datapath = auto_load_processed(processed_cycler_run_path_1)
        structured_datapath.structured_summary = structured_datapath.structured_summary[
            ~structured_datapath.structured_summary.cycle_index.
            isin(structured_datapath.diagnostic_summary.cycle_index)]

        sum_diag = featurizer_helpers.get_fractional_quantity_remaining_nx(
            structured_datapath,
            metric="discharge_energy",
            diagnostic_cycle_type="hppc")
        # print(sum_diag["normalized_regular_throughput"])
        self.assertEqual(len(sum_diag.index), 16)
        self.assertEqual(sum_diag.cycle_index.max(), 1507)
        self.assertEqual(
            np.around(sum_diag["initial_regular_throughput"].iloc[0], 3),
            np.around(237.001769, 3))
        self.assertEqual(
            np.around(sum_diag["normalized_regular_throughput"].iloc[15], 3),
            np.around(45.145, 3))
        self.assertEqual(
            np.around(sum_diag["normalized_diagnostic_throughput"].iloc[15],
                      3), np.around(5.098, 3))
        self.assertFalse(sum_diag.isnull().values.any())
        self.assertEqual(sum_diag['diagnostic_start_cycle'].iloc[0], 30)
        self.assertEqual(sum_diag['diagnostic_interval'].iloc[0], 100)
        self.assertEqual(sum_diag['epoch_time'].iloc[0], 1576641695)

        sum_diag = featurizer_helpers.get_fractional_quantity_remaining_nx(
            structured_datapath,
            metric="discharge_energy",
            diagnostic_cycle_type="rpt_1C")
        self.assertEqual(len(sum_diag.index), 16)
        self.assertEqual(sum_diag.cycle_index.max(), 1509)
        self.assertEqual(
            np.around(sum_diag["initial_regular_throughput"].iloc[0], 3),
            np.around(237.001769, 3))
        self.assertEqual(
            np.around(sum_diag["normalized_regular_throughput"].iloc[15], 3),
            np.around(45.145, 3))
        self.assertEqual(
            np.around(sum_diag["normalized_diagnostic_throughput"].iloc[15],
                      3), np.around(5.229, 3))
        self.assertEqual(sum_diag['diagnostic_start_cycle'].iloc[0], 30)
        self.assertEqual(sum_diag['diagnostic_interval'].iloc[0], 100)
        self.assertEqual(sum_diag['epoch_time'].iloc[0], 1576736230)

        processed_cycler_run_path_2 = os.path.join(
            TEST_FILE_DIR,
            "Talos_001383_NCR18650618001_CH31_truncated_structure.json")
        structured_datapath = auto_load_processed(processed_cycler_run_path_2)

        sum_diag = featurizer_helpers.get_fractional_quantity_remaining_nx(
            structured_datapath,
            metric="discharge_energy",
            diagnostic_cycle_type="hppc")
        self.assertEqual(len(sum_diag.index), 3)
        self.assertEqual(sum_diag.cycle_index.max(), 242)
        self.assertEqual(
            np.around(sum_diag["initial_regular_throughput"].iloc[0], 3),
            np.around(331.428, 3))
        self.assertEqual(
            np.around(sum_diag["normalized_regular_throughput"].iloc[2], 3),
            np.around(6.817, 3))
        self.assertEqual(
            np.around(sum_diag["normalized_diagnostic_throughput"].iloc[2], 3),
            np.around(0.385, 3))
        self.assertEqual(sum_diag['diagnostic_start_cycle'].iloc[0], 30)
        self.assertEqual(sum_diag['diagnostic_interval'].iloc[0], 200)
        self.assertEqual(sum_diag['epoch_time'].iloc[0], 1598156928)
コード例 #57
0
        policy_loss_sum += params["SUPERVISED_WEIGHT"] * contrast_loss(
            latent_mu, latent_mu_d2, 1)

    policy_loss_sum += params["VARIANCE_WEIGHT"] * torch.norm(latent_variance)

    loss_copy = policy_loss_sum.detach().cpu().numpy().copy()
    policy_loss_sum.backward()

    optimizer.step()

    if i_episode % params["CHKP_FREQ"] == 0:
        torch.save(policy.state_dict(), os.path.join(exp_dir, 'reinforce.pkl'))

        img = np.zeros((params["HEIGHT"], params["WIDTH"] * 3, 3),
                       dtype=np.uint8)
        img[:, :params["WIDTH"], :] = np.around(t2n(state) * 255, 0)
        img[:, params["WIDTH"]:params["WIDTH"] * 2, :] = np.around(
            next_state * 255, 0)
        diff = (np.sum(t2n(state) - next_state, axis=2) + 3) / 6
        diff = np.dstack((diff, diff, diff))
        img[:, params["WIDTH"] * 2:, :] = np.around(diff * 255, 0)
        if LOGGING:
            wandb.log(
                {"img": wandb.Image(img, caption="{:04d}".format(i_episode))})

    if LOGGING:
        wandb.log({
            "rewards": np.mean(rewards_raw),
            "loss": float(loss_copy),
            "kld": KLD.item()
        })
コード例 #58
0
def plot_svm():
    """Creates a classifier for phase in room 1. Uses average meeting duration and number of meetings as features. 
        Uses support vector clustering (SVC) method."""

    pair_db = pd.read_csv('parsed_data/pair_times.csv')

    # The phase classifier will only be constructed for room 1
    room_1 = pair_db.loc[pair_db['room_id'] == 1, :]

    # Change the phase column from string to numeric type, so it can be used as input to scipy machine learning functions
    num_phase = {
        'PHASE 1 dark': 0,
        'PHASE 1 light': 1,
        'PHASE 2 dark': 2,
        'PHASE 2 light': 3,
        'PHASE 3 dark': 4,
        'PHASE 3 light': 5
    }
    room_1['phase'] = room_1['phase'].replace(num_phase)

    # 3 phases will be classified, PHASE 1 dark, PHASE 1 light, PHASE 2 dark
    room_1 = room_1.loc[
        room_1['phase'] <= 2,
        ['average_meeting_duration', 'number_of_meetings', 'phase']]

    # Take the 'average_meeting_duration' and 'number_of_meetings' columns and change them to np.array type
    # Transform the results by natural logarithm, they will be easier and faster to classify
    X = np.log(room_1.as_matrix()[:, :2])

    # Take the phase information for training the classifier
    y = room_1.as_matrix()[:, 2]

    # we create an instance of SVM and fit out data.
    C = 1.0  # SVM regularization parameter
    svc = svm.SVC(kernel='linear', C=C).fit(X, y)

    # Get classification accuracy
    score = np.around(svc.score(X, y, sample_weight=None) * 100)

    # create a mesh to plot in
    h = 0.1  # step size in the mesh

    x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
    y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                         np.arange(y_min, y_max, h))

    # Define colors for plotting so they match with previous plots from plot_pair_results()
    color_dict = {0: 'r', 1: 'b', 2: 'magenta'}
    color_map = [color_dict[phase] for phase in y]

    # Plot the decision boundary. For that, we will assign a color to each
    # point in the mesh [x_min, m_max]x[y_min, y_max].
    plt.style.use('ggplot')

    fig, axes = plt.subplots()

    # Classify the points from the mesh marking the areas belonging to each phase
    Z = svc.predict(np.c_[xx.ravel(), yy.ravel()])

    # Reshape for plotting in 2D
    Z = Z.reshape(xx.shape)

    # Plot the classifier results
    # Adjust the colors so they are the same on all plots
    levels = [-1, 0, 1, 2]
    cs = axes.contourf(xx,
                       yy,
                       Z,
                       levels,
                       colors=('r', 'b', 'magenta'),
                       extend='both',
                       antialiased=True,
                       alpha=0.2,
                       linewidth=1)

    # Plot also the training points
    axes.scatter(X[:, 0], X[:, 1], c=color_map)

    # Set figure parameters
    axes.set_xlabel('average meeting duration')
    axes.set_ylabel('number of meetigs')
    axes.set_xlim(xx.min(), xx.max())
    axes.set_ylim(yy.min(), yy.max())
    axes.set_title('SVC classification accuracy: %i %%' % (score))

    # make the legend
    artists, labels = cs.legend_elements()
    L = plt.legend(artists[1:-1],
                   labels[1:-1],
                   handleheight=2,
                   loc='lower left')
    L.get_texts()[0].set_text('PHASE 1 dark')
    L.get_texts()[1].set_text('PHASE 1 light')
    L.get_texts()[2].set_text('PHASE 2 dark')

    fig.savefig('figures/svc_phase.pdf')
コード例 #59
0
ファイル: image_manip.py プロジェクト: rizplate/d-script
def sample_with_rotation(x, center, angle,
                         box_dim,
                         fill_value=255,
                         wraparound=True,
                         stdev_threshold=None,
                         test_stdev=False):
    """ Get a patch from x, with rotation, without having to rotate the
    whole image. (Turns out this is really slow)
    
    """
    rows, cols = box_dim
    img_rows, img_cols = x.shape

    if test_stdev:
        l, b, r, t = sample_bounds = center[0] - cols / 2, \
                                     center[1] + rows / 2, \
                                     center[0] + cols / 2, \
                                     center[1] - rows / 2
        if t < 0:
            t = 0
        if l < 0:
            l = 0
        if r > img_cols:
            r = img_cols
        if b > img_rows:
            b = img_rows
        sample_stdev = np.std(x[t:b, l:r])
        if sample_stdev < stdev_threshold:
            return None

    # subtract half box width and height from translation vector to center it on sampling point
    # epsilon to avoid naughty numpy rounding behavior
    epsilon = 10e-4
    translation_matrix = np.zeros((3, 3), dtype=np.float32)
    np.fill_diagonal(translation_matrix, 1)
    translation_matrix[0, 2] = -cols / 2. + epsilon
    translation_matrix[1, 2] = -rows / 2. + epsilon

    # rotation matrix for sampling box centered on 0,0
    if angle != 0:
        rotation_matrix = np.zeros((3, 3), dtype=np.float32)
        rotation_matrix[0, 0] = np.cos(angle)
        rotation_matrix[1, 1] = np.cos(angle)
        rotation_matrix[1, 0] = np.sin(angle)
        rotation_matrix[0, 1] = -np.sin(angle)
        rotation_matrix[2, 2] = 1

    # second translation, into image coordinates
    back_translation_matrix = np.zeros((3, 3), dtype=np.float32)
    np.fill_diagonal(back_translation_matrix, 1)
    back_translation_matrix[0, 2] = center[0] + epsilon
    back_translation_matrix[1, 2] = center[1] + epsilon

    xforms = []

    # translation
    xforms.append(translation_matrix)
    # rotation in homogeneous coords
    if angle != 0:
        xforms.append(rotation_matrix)
    # translation back
    xforms.append(back_translation_matrix)

    # sampling grid in image coordinates
    sample = (np.ones(shape=(box_dim)) * fill_value).astype(x.dtype)
    x_coords = np.tile(np.arange(cols, dtype=np.float32), rows).reshape(-1)
    y_coords = np.repeat(np.arange(rows, dtype=np.float32), cols).reshape(-1)
    xy_coords = np.dstack((x_coords, y_coords, np.ones_like(x_coords))).transpose((0, 2, 1))
    new_xy_coords = xy_coords[:, :, :]
    for xform in xforms:
        new_xy_coords = np.dot(xform, new_xy_coords).transpose(1, 0, 2)

    new_xy_coords = np.around(new_xy_coords).astype(np.int32)

    # sample according to new coordinates
    for i in xrange(new_xy_coords.shape[2]):
        orig_x, orig_y, _ = orig_xy = np.around(xy_coords[0, :, i]).astype(np.int32)
        sample_x, sample_y, _ = sample_xy = new_xy_coords[0, :, i]
        try:
            img_col = sample_x
            img_row = sample_y
            if wraparound:
                img_col = img_col % img_cols
                img_row = img_row % img_rows
            else:
                if img_col < 0 or img_row < 0:
                    raise IndexError
            sample[orig_y, orig_x] = x[img_row, img_col]
        except IndexError:
            # out-of-bounds, just leave blank
            pass
    return sample
コード例 #60
0
def ShapleyRegression(game,
                      batch_size=512,
                      detect_convergence=True,
                      thresh=0.01,
                      n_samples=None,
                      paired_sampling=True,
                      return_all=False,
                      bar=True,
                      verbose=False):
    # Verify arguments.
    if isinstance(game, games.CooperativeGame):
        stochastic = False
    elif isinstance(game, stochastic_games.StochasticCooperativeGame):
        stochastic = True
    else:
        raise ValueError('game must be CooperativeGame or '
                         'StochasticCooperativeGame')

    # Possibly force convergence detection.
    if n_samples is None:
        n_samples = 1e20
        if not detect_convergence:
            detect_convergence = True
            if verbose:
                print('Turning convergence detection on')

    if detect_convergence:
        assert 0 < thresh < 1

    # Weighting kernel (probability of each subset size).
    num_players = game.players
    weights = np.arange(1, num_players)
    weights = 1 / (weights * (num_players - weights))
    weights = weights / np.sum(weights)

    # Calculate null and grand coalitions for constraints.
    if stochastic:
        null = game.null(batch_size=batch_size)
        grand = game.grand(batch_size=batch_size)
    else:
        null = game.null()
        grand = game.grand()

    # Calculate difference between grand and null coalitions.
    total = grand - null

    # Set up bar.
    n_loops = int(np.ceil(n_samples / batch_size))
    if bar:
        if detect_convergence:
            bar = tqdm(total=1)
        else:
            bar = tqdm(total=n_loops * batch_size)

    # Setup.
    A = calculate_A(num_players)
    n = 0
    b = 0
    b_sum_squares = 0

    # For tracking progress.
    if return_all:
        N_list = []
        std_list = []
        val_list = []

    # Begin sampling.
    for it in range(n_loops):
        # Sample subsets.
        S = np.zeros((batch_size, num_players), dtype=bool)
        num_included = np.random.choice(num_players - 1, size=batch_size,
                                        p=weights) + 1
        for row, num in zip(S, num_included):
            inds = np.random.choice(num_players, size=num, replace=False)
            row[inds] = 1

        # Sample exogenous (if applicable).
        if stochastic:
            U = game.sample(batch_size)

        # Update estimators.
        if paired_sampling:
            # Paired samples.
            if stochastic:
                game_eval = game(S, U) - null
                S_comp = np.logical_not(S)
                comp_eval = game(S_comp, U) - null
                b_sample = 0.5 * (
                    S.astype(float).T * game_eval[:, np.newaxis].T
                    + S_comp.astype(float).T * comp_eval[:, np.newaxis].T).T
            else:
                game_eval = game(S) - null
                S_comp = np.logical_not(S)
                comp_eval = game(S_comp) - null
                b_sample = 0.5 * (
                    S.astype(float).T * game_eval[:, np.newaxis].T
                    + S_comp.astype(float).T * comp_eval[:, np.newaxis].T).T
        else:
            # Single sample.
            if stochastic:
                b_sample = (S.astype(float).T
                            * (game(S, U) - null)[:, np.newaxis].T).T
            else:
                b_sample = (S.astype(float).T
                            * (game(S) - null)[:, np.newaxis].T).T

        # Welford's algorithm.
        n += batch_size
        b_diff = b_sample - b
        b += np.sum(b_diff, axis=0) / n
        b_diff2 = b_sample - b
        b_sum_squares += np.sum(
            np.expand_dims(b_diff, 2) * np.expand_dims(b_diff2, 1),
            axis=0)

        # Calculate progress.
        values, std = calculate_exact_result(A, b, total, b_sum_squares, n)
        ratio = np.max(
            np.max(std, axis=0) / (values.max(axis=0) - values.min(axis=0)))

        # Print progress message.
        if verbose:
            if detect_convergence:
                print(f'StdDev Ratio = {ratio:.4f} (Converge at {thresh:.4f})')
            else:
                print(f'StdDev Ratio = {ratio:.4f}')

        # Check for convergence.
        if detect_convergence:
            if ratio < thresh:
                if verbose:
                    print('Detected convergence')

                # Skip bar ahead.
                if bar:
                    bar.n = bar.total
                    bar.refresh()
                break

        # Forecast number of iterations required.
        if detect_convergence:
            N_est = (it + 1) * (ratio / thresh) ** 2
            if bar and not np.isnan(N_est):
                bar.n = np.around((it + 1) / N_est, 4)
                bar.refresh()
        elif bar:
            bar.update(batch_size)

        # Save intermediate quantities.
        if return_all:
            val_list.append(values)
            std_list.append(std)
            if detect_convergence:
                N_list.append(N_est)

    # Return results.
    if return_all:
        # Dictionary for progress tracking.
        iters = (
            (np.arange(it + 1) + 1) * batch_size *
            (1 + int(paired_sampling)))
        tracking_dict = {
            'values': val_list,
            'std': std_list,
            'iters': iters}
        if detect_convergence:
            tracking_dict['N_est'] = N_list

        return utils.ShapleyValues(values, std), tracking_dict
    else:
        return utils.ShapleyValues(values, std)