Example #1
0
  def _testDefaultBasic(self, dtype):
    indices = np.asarray([0, 2, -1, 1], dtype=dtype)
    depth = 3

    truth = np.asarray(
            [[1.0, 0.0, 0.0],
             [0.0, 0.0, 1.0],
             [0.0, 0.0, 0.0],
             [0.0, 1.0, 0.0]],
            dtype=dtype)

    # axis == -1
    self._testBothOneHot(
            indices=indices,
            depth=depth,
            dtype=dtype,
            truth=truth)

    # axis == 0
    self._testBothOneHot(
            indices=indices,
            depth=depth,
            axis=0,
            dtype=dtype,
            truth=truth.T)  # Output is transpose version in this case
Example #2
0
def time_column(table, ifo=None):
    """Extract the 'time' column from the given table.

    This function uses time_func to determine the correct column to
    use as a proxy for 'time' and returns that column.
    The following mappings are used:
    - `sngl_inspiral` -> 'end' time
    - `sngl_burst` -> 'peak' time
    - `sngl_ringdown` -> 'start' time

    @param table
        any `LIGO_LW` table
    @param ifo
        an interferometer prefix if you want single-detector times

    @returns a numpy array object with a 'time' element for each row in
    the table
    """
    if hasattr(table, "get_time"):
        return numpy.asarray(table.get_time())
    func_name = time_func(ligolw_table.StripTableName(table.tableName)).__name__
    if hasattr(table, func_name):
        return numpy.asarray(getattr(table, func_name)())
    else:
        return numpy.asarray(map(func_name, table))
Example #3
0
def load_adm_sat_school_data(return_X_y=False):

    with open("./merged_adm_sat_data.csv") as csv_file:
        data_file = csv.reader(csv_file)
        temp = next(data_file)
        n_samples = int(temp[0])
        n_features = int(temp[1])
        target_names = np.array(temp[2:])


    df = pd.read_csv("./merged_adm_sat_data.csv", sep=",", usecols=(0, 1, 2, 3), skiprows=0)
    data = np.empty((n_samples, n_features), dtype=int)
    target = np.ma.empty((n_samples,), dtype=int)

    for index, row in df.iterrows():
        data[index] = np.asarray([df.iloc[index][0], df.iloc[index][1], df.iloc[index][2]], dtype=np.float)
        target[index] = np.asarray(df.iloc[index][3], dtype=np.int)

    feature_names = np.array(['ACT_AVG','SAT_AVG','GRAD_DEBT','REGION'])

    if return_X_y:
        return data, target

    return datasets.base.Bunch(data=data, target=target,
                 target_names=target_names,
                 DESCR='School Data set',
                 feature_names=feature_names)
  def test_layer_div(self):
 # Ensure layer division  gives the correct output
      layer_o=self.layer6/self.layer7
      array1=np.asarray(layer_o.get_nparray())
      res = np.asarray([[0.2]*3]*3)
      self.assertEqual(np.all(array1==0.2),True)
      self.assertTrue(allequal(layer_o._data, res))
 def test_layer_sub(self):
 # Ensure layer subtraction  gives the correct output
     layer_o=self.layer6-self.layer7
     array1=np.asarray(layer_o.get_nparray())
     res = np.asarray([[-4]*3]*3)
     self.assertEqual(np.all(array1==-4),True)
     self.assertTrue(allequal(layer_o._data, res))
Example #6
0
  def save_nodes_to_store(self, store, queue):
    for node_id, node in self.nodes.items():
      features = {}
      features['neighbors'] = node['neighbors']
      if 'soft_label' in self.nodes_features:
        features['soft_label'] = node['soft_label']
      if 'size' in self.nodes_features:
        features['size'] = len(node['pos'])
      if 'pos' in self.nodes_features:
        features['pos'] = np.asarray(node['pos'])
        if features['pos'].shape == (0,):
          features['pos'] = np.zeros(shape=(0,3))
      if 'mesh' in self.nodes_features:
        #Because ml incluedes the overlap is possible
        #That a node has a mesh in the overlap
        #But not a single voxel in the non-overlap region
        vertices, triangles = mesh.marche_cubes( node_id , self.ml )
        vertices += np.asarray(self.start).astype(np.uint16) * 2 #translate mesh
        features['mesh'] = mesh.get_adjacent( vertices, triangles )
      if 'semantic_sum' in self.nodes_features:
        features['semantic_sum'] = node['semantic_sum']

      features['tree'] = Tree(node_id)
      existent_node_features = store.get_node(node_id)
      if existent_node_features:
        features = self.sum_nodes_features(existent_node_features, features )
      store.put_node(node_id, features)
Example #7
0
 def GetAllData(self):
     Cal = self.GetCal()
     pars = self.query('CALC:PAR:CAT?')
     pars = pars.strip('\n').strip("'").split(',')
     parnames = pars[1::2]
     pars = pars[::2]
     names = ['Frequency (Hz)']
     alltrc = [self.GetFrequency()]
     for pp in parnames:
         names.append('%sre ()' % pp)
         names.append('%sim ()' % pp)
     if Cal:
         for pp in parnames:
             names.append('%sre unc ()' % pp)
             names.append('%sim unc ()' % pp)
     for par in pars:
         yy = self.query("CALC:DATA:TRAC? '%s', SDAT" % par)
         yy = np.asarray([float(xx) for xx in yy.split(',')])
         yyre = yy[::2]
         yyim = yy[1::2]
         alltrc.append(yyre)
         alltrc.append(yyim)
     if Cal:
         for par in pars:
             yy = self.query("CALC:DATA:TRAC? '%s', NCD" % par)
             yy = np.asarray([float(xx) for xx in yy.split(',')])
             yyre = yy[::2]
             yyim = yy[1::2]
             alltrc.append(yyre)
             alltrc.append(yyim)
     final = OrderedDict()
     for name,data in zip(names,alltrc):
         final[name]=data
     return final
def encode_doc(doc, max_len):
    if doc is None:
        return np.asarray([])

    # enc = np.asarray([max(min(ord(c), max_char-1), 0) for c in doc[:max_len]])
    enc = np.asarray([vocab.token2id.get(c, default_id) for c in itertools.islice(gensim.utils.tokenize(doc, to_lower=True), max_len)])
    return enc
Example #9
0
  def __init__(self, qdir='GM', verbosity=1, filepattern=None):
    """
      qdir      ... (opt) 'GM' or 'GK' for symmetry direction of q
      verbosity ... (opt) 0 silent, 1 minimal output, 3 debug, >3 debug interpolation
      filepattern.. (opt) read eps2D from files matching filepattern
                          qdir has no effect in this case
    """
    self.qdir      = qdir;
    self.verbosity = verbosity;

    # read DP-output files containing EPS2D (sorted by momentum transfer)
    if filepattern is None:
      self.path   = os.path.dirname(os.path.abspath(__file__))+'/data/'+qdir;
      filepattern = self.path+'/CUTOFF_R12.6_grapheneAA-2d0-HIGH-RPA*-high-%s-q*_outlf.eps'%(self.qdir);
    self.spectra= dp_mdf.GetMDF(filepattern);
    self.spectra.sort(key=lambda mdf: np.linalg.norm(mdf.get_q('cc','au')));

    # check that we have eps2D
    assert len(self.spectra)>0
    for mdf in self.spectra:
      assert mdf.param['quantity']=='mdf';
      assert (''.join(mdf.param['comment'])).find('eps2D'); 

    # extract data
    self.eps2D  = np.asarray([ mdf.eps for mdf in self.spectra ]);
    q           = [ np.linalg.norm(mdf.get_q('cc','au')) for mdf in self.spectra ];
    self.q      = np.asarray(q, dtype=float);  # in 1/bohr
    self.E      = self.spectra[0].get_E();     # in eV
    self.calc_param = deepcopy(self.spectra[0].param);
    self.set_qprecision();
Example #10
0
    def set_data(self, x, y, A):
        A = cbook.safe_masked_invalid(A)
        if x is None:
            x = np.arange(0, A.shape[1]+1, dtype=np.float64)
        else:
            x = np.asarray(x, np.float64).ravel()
        if y is None:
            y = np.arange(0, A.shape[0]+1, dtype=np.float64)
        else:
            y = np.asarray(y, np.float64).ravel()

        if A.shape[:2] != (y.size-1, x.size-1):
            print A.shape
            print y.size
            print x.size
            raise ValueError("Axes don't match array shape")
        if A.ndim not in [2, 3]:
            raise ValueError("A must be 2D or 3D")
        if A.ndim == 3 and A.shape[2] == 1:
            A.shape = A.shape[:2]
        self.is_grayscale = False
        if A.ndim == 3:
            if A.shape[2] in [3, 4]:
                if (A[:,:,0] == A[:,:,1]).all() and (A[:,:,0] == A[:,:,2]).all():
                    self.is_grayscale = True
            else:
                raise ValueError("3D arrays must have RGB or RGBA as last dim")
        self._A = A
        self._Ax = x
        self._Ay = y
        self._rgbacache = None
def test_sym_matrix_to_vec():
    sym = np.ones((3, 3))
    sqrt2 = 1. / sqrt(2.)
    vec = np.array([sqrt2, 1., sqrt2, 1., 1., sqrt2])
    assert_array_almost_equal(sym_matrix_to_vec(sym), vec)

    vec = np.array([1., 1., 1.])
    assert_array_almost_equal(sym_matrix_to_vec(sym, discard_diagonal=True),
                              vec)

    # Check sym_matrix_to_vec is the inverse function of vec_to_sym_matrix
    n = 5
    p = n * (n + 1) // 2
    rand_gen = np.random.RandomState(0)
    # when diagonal is included
    vec = rand_gen.rand(p)
    sym = vec_to_sym_matrix(vec)
    assert_array_almost_equal(sym_matrix_to_vec(sym), vec)

    # when diagonal given separately
    diagonal = rand_gen.rand(n + 1)
    sym = vec_to_sym_matrix(vec, diagonal=diagonal)
    assert_array_almost_equal(sym_matrix_to_vec(sym, discard_diagonal=True),
                              vec)

    # multiple matrices case when diagonal is included
    vecs = np.asarray([vec, 2. * vec, 0.5 * vec])
    syms = vec_to_sym_matrix(vecs)
    assert_array_almost_equal(sym_matrix_to_vec(syms), vecs)

    # multiple matrices case when diagonal is given seperately
    diagonals = np.asarray([diagonal, 3. * diagonal, -diagonal])
    syms = vec_to_sym_matrix(vecs, diagonal=diagonals)
    assert_array_almost_equal(sym_matrix_to_vec(syms, discard_diagonal=True),
                              vecs)
Example #12
0
def segment_haar(cnarr):
    """Do segmentation for CNVkit.

    Calculate copy number segmentation by HaarSeg
    (http://haarseg.r-forge.r-project.org/)
    Input: log2 coverage data in Nexus 'basic' format
    Output: the CBS data table

    """
    chrom_tables = []
    # Segment each chromosome individually
    # ENH - skip large gaps (segment chrom. arms separately)
    for chrom, subprobes in cnarr.by_chromosome():
        # echo(chrom, ':')  # DBG
        segtable = haarSeg(subprobes['log2'])
        chromtable = pd.DataFrame({
            'chromosome': chrom,
            'start': np.asarray(subprobes['start']).take(segtable['start']),
            'end': np.asarray(subprobes['end']
                             ).take(segtable['start']+segtable['size']-1),
            'gene': '.',
            'log2': segtable['log2'],
            'probes': segtable['size'],
        })
        # echo(chromtable)  # DBG
        chrom_tables.append(chromtable)
    result = pd.concat(chrom_tables)
    echo("haar: Found", len(result), "segments")
    segarr = cnarr.as_dataframe(result)
    segarr.sort_columns()
    return segarr
  def _testTensorArrayWriteConcat(self, tf_dtype):
    dtype = tf_dtype.as_numpy_dtype()
    with self.test_session(use_gpu=self._use_gpu):
      ta = tensor_array_ops.TensorArray(
          dtype=tf_dtype, tensor_array_name="foo", size=3, infer_shape=False)

      if tf_dtype == tf.string:
        # In Python3, np.str is unicode, while we always want bytes
        convert = lambda x: np.asarray(x).astype("|S")
      else:
        convert = lambda x: np.asarray(x).astype(dtype)

      w0 = ta.write(0, convert([[4.0, 5.0], [104.0, 105.0], [204.0, 205.0]]))
      w1 = w0.write(1, convert([[6.0, 7.0], [106.0, 107.0]]))
      w2 = w1.write(2, convert([[8.0, 9.0]]))

      c0 = w2.concat()

      self.assertAllEqual(
          convert([[4.0, 5.0],
                   [104.0, 105.0],
                   [204.0, 205.0],
                   [6.0, 7.0],
                   [106.0, 107.0],
                   [8.0, 9.0]]), c0.eval())
def access_Measurement(lat, long, year):
    path_ccsm4 = '/Users/DavidKMYang/ClimateResearch/WBGT/ccsm4_tasmax_nepal/'

    os.chdir(path_ccsm4)
    file_names_ccsm4 = glob.glob("tasmax_" + str(year)+"*.mat")

    for i in range(len(file_names_ccsm4)):
        lat_index = 0
        long_index = 0
        print (file_names_ccsm4[i])
        tempData = scipy.io.loadmat(path_ccsm4 + file_names_ccsm4[i])
        tempData = tempData[file_names_ccsm4[i][:-4]][0]

        tempLatList = []
        for k in range(len(tempData[0])):
            tempLatList.append(tempData[0][k][0])
        tempLatList = np.asarray(tempLatList)
        lat_index = find_nearest(tempLatList, lat)

        tempLongList = tempData[1][0]
        tempLongList = np.asarray(tempLongList)

        long_index = find_nearest(tempLongList, long)
        print (tempLatList[lat_index])
        print (tempLongList[long_index])
        print (tempData[2][lat_index][long_index])


        access_Measurement(25, 30, 2001)
 def shared(data):
     """ Place the data into shared variables. This allows Theano to copy
     the data to the GPU, if one is available.
     """
     shared_x = theano.shared(numpy.asarray(data[:,0].tolist(), dtype=theano.config.floatX), borrow=True)
     shared_y = theano.shared(numpy.asarray(data[:,1].tolist(), dtype=theano.config.floatX), borrow=True)
     return shared_x, T.cast(shared_y, "int32")
def test_cross_validator_with_default_indices():
    n_samples = 4
    n_unique_labels = 4
    n_folds = 2
    p = 2
    n_iter = 10  # (the default value)

    X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
    X_1d = np.array([1, 2, 3, 4])
    y = np.array([1, 1, 2, 2])
    labels = np.array([1, 2, 3, 4])
    loo = LeaveOneOut()
    lpo = LeavePOut(p)
    kf = KFold(n_folds)
    skf = StratifiedKFold(n_folds)
    lolo = LeaveOneLabelOut()
    lopo = LeavePLabelOut(p)
    ss = ShuffleSplit(random_state=0)
    ps = PredefinedSplit([1, 1, 2, 2])  # n_splits = np of unique folds = 2

    n_splits = [n_samples, comb(n_samples, p), n_folds, n_folds,
                n_unique_labels, comb(n_unique_labels, p), n_iter, 2]

    for i, cv in enumerate([loo, lpo, kf, skf, lolo, lopo, ss, ps]):
        # Test if get_n_splits works correctly
        assert_equal(n_splits[i], cv.get_n_splits(X, y, labels))

        # Test if the cross-validator works as expected even if
        # the data is 1d
        np.testing.assert_equal(list(cv.split(X, y, labels)),
                                list(cv.split(X_1d, y, labels)))
        # Test that train, test indices returned are integers
        for train, test in cv.split(X, y, labels):
            assert_equal(np.asarray(train).dtype.kind, 'i')
            assert_equal(np.asarray(train).dtype.kind, 'i')
Example #17
0
    def add_lines(self, levels, colors, linewidths, erase=True):
        """
        Draw lines on the colorbar.

        *colors* and *linewidths* must be scalars or
        sequences the same length as *levels*.

        Set *erase* to False to add lines without first
        removing any previously added lines.
        """
        y = self._locate(levels)
        igood = (y < 1.001) & (y > -0.001)
        y = y[igood]
        if cbook.iterable(colors):
            colors = np.asarray(colors)[igood]
        if cbook.iterable(linewidths):
            linewidths = np.asarray(linewidths)[igood]
        N = len(y)
        x = np.array([0.0, 1.0])
        X, Y = np.meshgrid(x, y)
        if self.orientation == "vertical":
            xy = [list(zip(X[i], Y[i])) for i in range(N)]
        else:
            xy = [list(zip(Y[i], X[i])) for i in range(N)]
        col = collections.LineCollection(xy, linewidths=linewidths)

        if erase and self.lines:
            for lc in self.lines:
                lc.remove()
            self.lines = []
        self.lines.append(col)
        col.set_color(colors)
        self.ax.add_collection(col)
def test_stratified_shuffle_split_init():
    X = np.arange(7)
    y = np.asarray([0, 1, 1, 1, 2, 2, 2])
    # Check that error is raised if there is a class with only one sample
    assert_raises(ValueError, next,
                  StratifiedShuffleSplit(3, 0.2).split(X, y))

    # Check that error is raised if the test set size is smaller than n_classes
    assert_raises(ValueError, next, StratifiedShuffleSplit(3, 2).split(X, y))
    # Check that error is raised if the train set size is smaller than
    # n_classes
    assert_raises(ValueError, next,
                  StratifiedShuffleSplit(3, 3, 2).split(X, y))

    X = np.arange(9)
    y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
    # Check that errors are raised if there is not enough samples
    assert_raises(ValueError, StratifiedShuffleSplit, 3, 0.5, 0.6)
    assert_raises(ValueError, next,
                  StratifiedShuffleSplit(3, 8, 0.6).split(X, y))
    assert_raises(ValueError, next,
                  StratifiedShuffleSplit(3, 0.6, 8).split(X, y))

    # Train size or test size too small
    assert_raises(ValueError, next,
                  StratifiedShuffleSplit(train_size=2).split(X, y))
    assert_raises(ValueError, next,
                  StratifiedShuffleSplit(test_size=2).split(X, y))
def sample_every_two_correlation_times(energy_data, magnetization_data, correlation_time, no_of_sites):
    """Sample the given data every 2 correlation times and determine value and error."""
    magnet_samples = []
    energy_samples = []

    for t in np.arange(0, len(energy_data), 2 * int(np.ceil(correlation_time))):
        magnet_samples.append(magnetization_data[t])
        energy_samples.append(energy_data[t])

    magnet_samples = np.asarray(magnet_samples)
    energy_samples = np.asarray(energy_samples)

    abs_magnetization = np.mean(np.absolute(magnet_samples))
    abs_magnetization_error = calculate_error(magnet_samples)
    print("<m> (<|M|/N>) = {0} +/- {1}".format(abs_magnetization, abs_magnetization_error))

    magnetization = np.mean(magnet_samples)
    magnetization_error = calculate_error(magnet_samples)
    print("<M/N> = {0} +/- {1}".format(magnetization, magnetization_error))

    energy = np.mean(energy_samples)
    energy_error = calculate_error(energy_samples)
    print("<E/N> = {0} +/- {1}".format(energy, energy_error))

    magnetization_squared = np.mean((magnet_samples * no_of_sites)**2)
    magnetization_squared_error = calculate_error((magnet_samples * no_of_sites)**2)
    print("<M^2> = {0} +/- {1}".format(magnetization_squared, magnetization_squared_error))
Example #20
0
    def Draw(self, nrb=None, MeshColor=None, NurbsColor=None, PointsColor=None, alpha=ALPHA, blend=False):

        if NurbsColor is None:
            if self.NurbsColor is None:
                NurbsColor = list(asarray(Theme().color_viewer("default_patch")).copy())
            else:
                NurbsColor = list(asarray(self.NurbsColor).copy())
        if self.show:
            if nrb is not None:
                list_nrb = [nrb]
            else:
                list_nrb = self._list

            for i in range(0, len(list_nrb)):
                nrb = list_nrb[i]
                nrbInfo = self.list_patchInfo[i]
                if nrbInfo.show:
                    _NurbsColor = asarray(NurbsColor).copy()
                    if nrbInfo.NurbsColor is not None:
                        _NurbsColor = asarray(nrbInfo.NurbsColor).copy()
                    NurbsSteps = nrbInfo.steps
                    evaluator = self.GetEvaluator(
                        nrb, MeshColor=MeshColor, NurbsColor=_NurbsColor, alpha=alpha, steps=NurbsSteps
                    )
                    showMesh = self.showMesh or nrbInfo.showMesh
                    evaluator.draw(mesh=showMesh, nurbs=True, blend=blend)
                if self.showPoints or nrbInfo.showPoints:
                    # Draw control points
                    self.DrawControlPoints(nrb, PointsColor=PointsColor, alpha=alpha, blend=blend)
def learn(tuned_parameters,model):

	# produceFeature(trainfile)
	dataset = genfromtxt(open('Data/'+trainfile,'r'), delimiter=',',dtype='f8')[0:]
	target = [x[0] for x in dataset]
	train = [x[1:] for x in dataset]
	# print train[1:10]
	# print target
	# print len(train)

	# produceFeature(testfile)
	test = genfromtxt(open('Data/'+testfile,'r'),delimiter=',',dtype='f8')[0:]
	test_target = [x[1:] for x in test]


	# X, y = digits.data, digits.target
	trainnp = np.asarray(train)
	targetnp = np.asarray(target)


	# turn the data in a (samples, feature) matrix:
	X, y = trainnp, targetnp
	# X = digits.images.reshape((n_samples, -1))
	# y = digits.target

	# Split the dataset in two equal parts
	X_train, X_test, y_train, y_test = train_test_split(
	    X, y, test_size=0.5, random_state=0)



	scores = ['precision', 'recall']

	for score in scores:
	    print("# Tuning hyper-parameters for %s" % score)
	    print()

	    clf = GridSearchCV(model, tuned_parameters, cv=5,
	                       scoring='%s_weighted' % score)
	    clf.fit(X_train, y_train)

	    print("Best parameters set found on development set:")
	    print()
	    print(clf.best_params_)
	    print()
	    print("Grid scores on development set:")
	    print()
	    for params, mean_score, scores in clf.grid_scores_:
	        print("%0.3f (+/-%0.03f) for %r"
	              % (mean_score, scores.std() * 2, params))
	    print()

	    print("Detailed classification report:")
	    print()
	    print("The model is trained on the full development set.")
	    print("The scores are computed on the full evaluation set.")
	    print()
	    y_true, y_pred = y_test, clf.predict(X_test)
	    print(classification_report(y_true, y_pred))
	    print()
Example #22
0
def _threshold_brier_score_vectorized(observations, forecasts, thresholds):
    observations = np.asarray(observations)
    thresholds = np.asarray(thresholds)
    forecasts = np.asarray(forecasts)

    def exceedances(x):
        # NaN safe calculation of threshold exceedances
        # add an extra dimension to `x` and broadcast `thresholds` so that it
        # varies along that new dimension
        with suppress_warnings('invalid value encountered in greater'):
            exceeds = (x[..., np.newaxis] >
                       thresholds.reshape((1,) * x.ndim + (-1,))
                       ).astype(float)
        if x.ndim == 0 and np.isnan(x):
            exceeds[:] = np.nan
        else:
            exceeds[np.where(np.isnan(x))] = np.nan
        return exceeds

    binary_obs = exceedances(observations)
    if observations.shape == forecasts.shape:
        prob_forecast = exceedances(forecasts)
    elif observations.shape == forecasts.shape[:-1]:
        # axis=-2 should be the 'realization' axis, after swapping that axes
        # to the end of forecasts and inserting one extra axis
        with suppress_warnings('Mean of empty slice'):
            prob_forecast = np.nanmean(exceedances(forecasts), axis=-2)
    else:
        raise AssertionError
    return brier_score(binary_obs, prob_forecast)
Example #23
0
 def test_layer_add(self):
 #  Ensure layer addition  gives the correct output
     layer_o=self.layer6+self.layer7
     array1=np.asarray(layer_o.get_nparray())
     res = np.asarray([[6]*3]*3)
     self.assertEqual(np.all(array1==6),True)
     self.assertTrue(allequal(layer_o._data, res))
Example #24
0
def Seuil_var(img):
    """
    This fonction compute threshold value. In first the image's histogram is calculated. The threshold value is set to the first indexe of histogram wich respect the following criterion : DH > 0, DH(i)/H(i) > 0.1 , H(i) < 0.01 % of the Norm. 

    In : img : ipl Image : image to treated
    Out: seuil : Int : Value of the threshold 
    """
    dim=255
    MaxValue=np.amax(np.asarray(img[:]))
    Norm = np.asarray(img[:]).shape[0]*np.asarray(img[:]).shape[1]
    scale=MaxValue/dim
    Wdim=dim*scale
    MaxValue=np.amax(np.asarray(img[:]))
    bins= [float(x) for x in range(dim)]
    hist,bin_edges = np.histogram(np.asarray(img[:]), bins)
    Norm = Norm -hist[0]
    median=np.median(hist)
    mean=0
    var=0
    i=1
    som = 0
    while (som < 0.8*Norm and i <len(hist)-1):
      som = som + hist[i]
      i=i+1
    while ((hist[i]-hist[i-1] < 0 or (hist[i]-hist[i-1])/hist[i-1]>0.1 or hist[i]> 0.01*Norm ) and i < len(hist)-1):
      i=i+1
    if( i == len(hist)-1):
      seuil=0
      

    seuil = i
    var = 0
    return seuil
Example #25
0
 def test_layer_mul(self):
 # Ensure layer multiplication  gives the correct output
     layer_o=self.layer6*self.layer7
     array1=np.asarray(layer_o.get_nparray())
     res = np.asarray([[5]*3]*3)
     self.assertEqual(np.all(array1==5),True)
     self.assertTrue(allequal(layer_o._data, res))
def test_point_in_poly3(point):
    """
    tests points that should be in the polygon
    """

    assert point_in_poly(poly2_ccw, np.asarray(point, dtype=np.float64))
    assert point_in_poly(poly2_cw, np.asarray(point, dtype=np.float64))
Example #27
0
def compress(data):
    """
    Convert 4-byte integer value to semi-logarithmic 2-byte integer.

    The storage format for numbers up to 32767 is the number itself
    The storage format for numbers above 32767 is
         - (mantissa + 10000*10**power)
    where the mantissa is 4 digits and power is 1, 2 or 3.
    
    add an extra integer at 0, 1024
    """
    data = numpy.asarray(data.flatten(), 'int32')
    assert len(data) == 16384

    # Logarithmic compression
    base = 10000
    erridx = data > 2767000
    idx = data > 32767
    power = numpy.ceil(numpy.log10(data[idx]))-4
    mantissa = data[idx] // (10**power)
    data[idx] = numpy.asarray(-(mantissa + power*base), data.dtype)
    data[erridx] = -777
    
    # Add values at 0, 1022, 2*1022, ...
    fulldata = numpy.zeros(16384 + 17, 'i')
    idx = numpy.arange(len(fulldata),dtype='i')%1022 != 0
    fulldata[idx] = data
    return fulldata
Example #28
0
def estimate_transition_matrix(count_matrix):
    """
    Simple Maximum Likelihood estimator of transition matrix.

    Parameters
    ----------
    count_matrix : array or sparse matrix
        A square matrix of transition counts

    Returns
    -------
    tProb : array or sparse matrix
         Most likely transition matrix given `tCount`
    """
    # 1.  Make sure you don't modify tCounts.
    # 2.  Make sure you handle both floats and ints
    if scipy.sparse.isspmatrix(count_matrix):
        C = scipy.sparse.csr_matrix(count_matrix).asfptype()
        weights = np.asarray(C.sum(axis=1)).flatten()
        inv_weights = np.zeros(len(weights))
        inv_weights[weights != 0] = 1.0 / weights[weights != 0]
        D = scipy.sparse.dia_matrix((inv_weights, 0), C.shape).tocsr()
        tProb = D.dot(C)
    else:
        tProb = np.asarray(count_matrix.astype(float))  # astype creates a copy
        weights = tProb.sum(axis=1)
        inv_weights = np.zeros(len(weights))
        inv_weights[weights != 0] = 1.0 / weights[weights != 0]
        tProb = tProb * inv_weights.reshape((weights.shape[0], 1))

    return tProb
Example #29
0
def resample(oldrate,newrate,x,n,dtype,factor):
    print "Resampling from",oldrate,"Hz to",newrate,"Hz, amplification factor",factor
    rategcd = gcd(oldrate,newrate)
    uprate = newrate / rategcd
    dnrate = oldrate / rategcd

    oldcount = len(x)
    midcount = oldcount * uprate
    newcount = midcount / dnrate

    print "Upsampling by",uprate
    if uprate == 1:
        yout = np.asarray(x, dtype=dtype)
    else:
        yout = np.zeros(midcount, dtype=dtype)
        for i in range(0, oldcount-1):
            yout[i * uprate] = x[i] * uprate

    wl = min(1.0/uprate,1.0/dnrate)
    print "Antialias filtering at",wl
    
    midrate = oldrate * uprate
    filt = firfilter(0, (midrate * wl) / 2.0, midrate, n)
    y = signal.lfilter(filt, 1, yout)

    print "Downsampling by",dnrate
    if dnrate == 1:
        yout = np.asarray(y, dtype=dtype)
    else:
        yout = np.zeros(newcount, dtype=dtype)
        for i in range(0, newcount-1):
            yout[i] = y[i * dnrate] * factor

    return yout
Example #30
0
  def _testBasic(self, dtype):
    indices = np.asarray([0, 2, -1, 1], dtype=np.int64)
    depth = 3
    on_value = np.asarray(1.0, dtype=dtype)
    off_value = np.asarray(-1.0, dtype=dtype)

    truth = np.asarray(
        [[1.0, -1.0, -1.0],
         [-1.0, -1.0, 1.0],
         [-1.0, -1.0, -1.0],
         [-1.0, 1.0, -1.0]],
        dtype=dtype)

    # axis == -1
    self._testBothOneHot(
        indices=indices,
        depth=depth,
        on_value=on_value,
        off_value=off_value,
        dtype=dtype,
        truth=truth)

    # axis == 0
    self._testBothOneHot(
        indices=indices,
        depth=depth,
        on_value=on_value,
        off_value=off_value,
        axis=0,
        dtype=dtype,
        truth=truth.T)  # Output is transpose version in this case