示例#1
0
    def compute_distances(self, x1, x2):
        """
        The method uses a function implemented in Cython. Data (`x1` and `x2`)
        is accompanied by two tables. One is a 2-d table in which elements of
        `x1` (`x2`) are replaced by 0's and 1's. The other is a vector
        indicating rows (or column) with nan values.

        The function in Cython uses a fast loop without any conditions to
        compute distances between rows without missing values, and a slower
        loop for those with missing values.
        """
        nonzeros1 = np.not_equal(x1, 0).view(np.int8)
        if self.axis == 1:
            nans1 = _distance.any_nan_row(x1)
            if x2 is None:
                nonzeros2, nans2 = nonzeros1, nans1
            else:
                nonzeros2 = np.not_equal(x2, 0).view(np.int8)
                nans2 = _distance.any_nan_row(x2)
            return _distance.jaccard_rows(
                nonzeros1, nonzeros2,
                x1, x1 if x2 is None else x2,
                nans1, nans2,
                self.ps,
                x2 is not None)
        else:
            nans1 = _distance.any_nan_row(x1.T)
            return _distance.jaccard_cols(
                nonzeros1, x1, nans1, self.ps)
示例#2
0
def best_grid(wavelengths1, wavelengths2, key):
    """
    Return the best wavelength grid to regrid to arrays

    Considering the two wavelength grids passed in parameters, this function
    compute the best new grid that will be used to regrid the two spectra
    before combining them. We do not use np.unique as it is much slowe than
    finding the unique elements by hand.

    Parameters
    ----------
    wavelengths1, wavelengths2: array of floats
        The wavelength grids to be 'regridded'.
    key: tuple
        Key to key the results in cache.

    Returns
    -------
    new_grid: array of floats
        Array containing all the wavelengths found in the input arrays.

    """

    if key in best_grid_cache:
        return best_grid_cache[key]
    wl = np.concatenate((wavelengths1, wavelengths2))
    wl.sort(kind='mergesort')
    flag = np.ones(len(wl), dtype=bool)
    np.not_equal(wl[1:], wl[:-1], out=flag[1:])
    best_grid_cache[key] = wl[flag]
    return wl[flag]
示例#3
0
    def node_can_drain(self, the_node):
        """Check if a node has drainage away from the current lake/depression.

        Parameters
        ----------
        the_node : int
            The node to test.
        nodes_this_depression : array_like of int
            Nodes that form a pit.

        Returns
        -------
        boolean
            ``True`` if the node can drain. Otherwise, ``False``.
        """
        nbrs = self._node_nbrs[the_node]
        not_bad = nbrs != LOCAL_BAD_INDEX_VALUE
        not_too_high = self._elev[nbrs] < self._elev[the_node]
        not_current_lake = np.not_equal(self.flood_status[nbrs], _CURRENT_LAKE)
        not_flooded = np.not_equal(self.flood_status[nbrs], _FLOODED)
        all_probs = np.logical_and(
            np.logical_and(not_bad, not_too_high),
            np.logical_and(not_current_lake, not_flooded))
        if np.any(all_probs):
            return True
        else:
            return False
示例#4
0
def parseArgs(data, targetClass, otherClass = None, **args) :
    '''parse arguments for a feature scoring function'''

    if 'feature' in args :
        feature = args['feature']
    else :
        feature = None
    if 'Y' in args :
        Y = args['Y']
        if otherClass is None :
            otherI = numpy.nonzero(numpy.not_equal(Y, targetClass))[0]
        else :
            otherI = numpy.nonzero(numpy.equal(Y, otherClass))[0]
        targetClassSize = numpy.sum(numpy.equal(Y, targetClass))
    else :
        Y = None
        if otherClass is None :
            otherI = numpy.nonzero(numpy.not_equal(data.labels.Y, targetClass))[0]
        else :
            otherI = data.labels.classes[otherClass]
        targetClassSize = len(data.labels.classes[targetClass])
    
    otherClassSize = len(otherI)

    return Y, targetClassSize, otherClassSize, otherI, feature
示例#5
0
    def test_prelu_param_updates(self):
        x_train, _, y_train, _ = simple_classification()
        prelu_layer1 = layers.PRelu(20, alpha=0.25)
        prelu_layer2 = layers.PRelu(1, alpha=0.25)

        gdnet = algorithms.GradientDescent(
            [
                layers.Input(10),
                prelu_layer1,
                prelu_layer2,
            ]
        )

        prelu1_alpha_before_training = prelu_layer1.alpha.get_value()
        prelu2_alpha_before_training = prelu_layer2.alpha.get_value()

        gdnet.train(x_train, y_train, epochs=10)

        prelu1_alpha_after_training = prelu_layer1.alpha.get_value()
        prelu2_alpha_after_training = prelu_layer2.alpha.get_value()

        self.assertTrue(all(np.not_equal(
            prelu1_alpha_before_training,
            prelu1_alpha_after_training,
        )))
        self.assertTrue(all(np.not_equal(
            prelu2_alpha_before_training,
            prelu2_alpha_after_training,
        )))
示例#6
0
文件: core.py 项目: nidhog/dedupe
def scoreDuplicates(records, data_model, pool, threshold=0):

    record, records = peek(records)

    id_type = idType(record)
    
    score_dtype = [('pairs', id_type, 2), ('score', 'f4', 1)]

    record_chunks = grouper(records, 100000)

    scoring_function = ScoringFunction(data_model, 
                                       threshold,
                                       score_dtype)

    results = [pool.apply_async(scoring_function,
                               (chunk,))
              for chunk in record_chunks] 

    for r in results :
       r.wait()

    scored_pairs = numpy.concatenate([r.get() for r in results])

    scored_pairs.sort()
    flag = numpy.ones(len(scored_pairs), dtype=bool)
    numpy.not_equal(scored_pairs[1:], 
                    scored_pairs[:-1], 
                    out=flag[1:])

    return scored_pairs[flag]
示例#7
0
def average_without_padding(x, ids, padding_id, cuda=False, eps=1e-8):
    if cuda:
        mask = Variable(torch.from_numpy(np.not_equal(ids, padding_id).astype(int)[:,:,np.newaxis])).float().cuda().permute(1, 2, 0).expand_as(x)
    else:
        mask = Variable(torch.from_numpy(np.not_equal(ids, padding_id).astype(int)[:,:,np.newaxis])).float().permute(1, 2, 0).expand_as(x)
    s = torch.sum(x*mask, dim=2) / (torch.sum(mask, dim=2)+eps)
    return s
def get_calipso_phase_inner(features, qual_min=CALIPSO_QUAL_VALUES['medium'],
                            max_layers=1, same_phase_in_top_three_lay=True):
    """
    Returns Calipso cloud phase.    
    Pixels with quality lower than *qual_min* are masked out.    
    Screen out pixels with more than *max_layers* layers.    
    """
    if same_phase_in_top_three_lay:
        phase1 = get_bits(features[:,0], CALIPSO_PHASE_BITS, shift=True)
        phase2 = get_bits(features[:,1], CALIPSO_PHASE_BITS, shift=True)
        phase3 = get_bits(features[:,2], CALIPSO_PHASE_BITS, shift=True)
        two_layer_pixels = features[:, 2] >1
        three_layer_pixels = features[:, 3] >1
        lay1_lay2_differ = np.logical_and(two_layer_pixels,
                                          np.not_equal(phase1, phase2))
        lay2_lay3_differ = np.logical_and(three_layer_pixels,
                                          np.not_equal(phase2, phase3))
        varying_phases_in_top_3lay = np.logical_or(lay1_lay2_differ,
                                                      lay2_lay3_differ)
    # Reduce to single layer, masking any multilayer pixels
    features = np.ma.array(features[:, 0],
                           mask=(features[:, max_layers:] > 1).any(axis=-1))
    if same_phase_in_top_three_lay:
        features = np.ma.array(features,                               
                                mask = varying_phases_in_top_3lay)
    phase = get_bits(features, CALIPSO_PHASE_BITS, shift=True)
    qual = get_bits(features, CALIPSO_QUAL_BITS, shift=True)    
    # Don't care about pixels with lower than *qual_min* quality
    return np.ma.array(phase, mask=qual < qual_min)
示例#9
0
def _calc_errors(truth, prediction, class_number=1):
    tp = np.sum(np.equal(truth,class_number)*np.equal(prediction,class_number))
    tn = np.sum(np.not_equal(truth,class_number)*np.not_equal(prediction,class_number))

    fp = np.sum(np.not_equal(truth,class_number)*np.equal(prediction,class_number))
    fn = np.sum(np.equal(truth,class_number)*np.not_equal(prediction,class_number))

    return tp, tn, fp, fn
示例#10
0
def merge(a, b):
    # http://stackoverflow.com/questions/12427146/combine-two-arrays-and-sort
    c = np.concatenate((a, b))
    c.sort(kind='mergesort')
    flag = np.ones(len(c), dtype=bool)
    np.not_equal(c[1:], c[:-1], out=flag[1:])

    return c[flag]
示例#11
0
def oht_model( gw, oro, fsns, flns, shfl, lhfl ):
    """parameters; must be dimensioned as specified:
    gwi  : gaussian weights (lat)
    oroi : orography data array (lat,lon)
      requires the lat and lon are attached coordinates of oro 
      and that oro and the following variables are 2D arrays (lat,lon).
    fsnsi: net shortwave solar flux at surface (lat,lon)
    flnsi: net longwave solar flux at surface (lat,lon)
    shfli: sensible heat flux at surface  (lat,lon)
    lhfli: latent heat flux at surface  (lat,lon)
    """
    re = 6.371e6            # radius of earth
    coef = re**2/1.e15      # scaled by PW
    heat_storage = 0.3      # W/m^2 adjustment for ocean heat storage 

    nlat = oro.shape[0]
    nlon = oro.shape[1]
    dlon = 2.*pi/nlon       # dlon in radians
    lat = latAxis(oro)
    i65n = numpy.where( lat[:]>=65 )[0][0]   # assumes that lat[i+1]>lat[i]
    i65s = numpy.where( lat[:]<=-65 )[0][-1]  # assumes that lat[i+1]>lat[i]

    # get the mask for the ocean basins
    basins_mask = ocean_mask(oro)    # returns 2D array(lat,lon) 
    # compute net surface energy flux
    netflux = fsns-flns-shfl-lhfl-heat_storage

    # compute the net flux for the basins
    netflux_basin = numpy.ma.empty( (3,nlat,nlon) )
    netflux_basin[0,:,:] = netflux[:,:]
    netflux_basin[1,:,:] = netflux[:,:]
    netflux_basin[2,:,:] = netflux[:,:]
    netflux_basin[:,:,:] = numpy.ma.masked  # to make sure the mask array gets created
    netflux_basin._mask[0,:,:] = numpy.not_equal(basins_mask,1) # False on Pacific
    netflux_basin._mask[1,:,:] = numpy.not_equal(basins_mask,2) # False on Atlantic
    netflux_basin._mask[2,:,:] = numpy.not_equal(basins_mask,3) # False on Indian

    # sum flux over the longitudes in each basin
    heatflux = numpy.ma.sum( netflux_basin, axis=2 )

    # compute implied heat transport in each basin
    oft = cdms2.createVariable( numpy.ma.masked_all((4,nlat)) )
    oft.setAxisList( [cdms2.createAxis([0,1,2,3],id='basin numer'),lat] )
    # These ! signs assign a name to a dimension of oft:
    #oft!0 = "basin number"   # 0:pacific, 1:atlantic, 2:indian, 3:total
    #oft!1 = "lat"

    for n in range(3):
        for j in range(i65n,i65s-1,-1):      #start sum at most northern point
            # ...assumes that lat[i+1]>lat[i]
            oft[n,j] = -coef*dlon*numpy.ma.sum( heatflux[n,j:i65n+1]*gw[j:i65n+1] )

    # compute total implied ocean heat transport at each latitude
    # as the sum over the basins at that latitude
    for j in range( i65n, i65s-1, -1 ):
        oft[3,j] = numpy.ma.sum( oft[0:3,j] )

    return oft       # 2D array(4,lat)
示例#12
0
def shrink_hyperrect(x0, x1, L, R):
    """
    
    """
    L_or_R = (x1 >= x0) #Modifications to R
    R[L_or_R] = x1[L_or_R]
    np.not_equal(L_or_R, True, L_or_R) #Modifications to L
    L[L_or_R] = x1[L_or_R]
    return L, R
示例#13
0
    def _numpy(self, data, weights, shape):
        q = self.quantity(data)
        self._checkNPQuantity(q, shape)
        self._checkNPWeights(weights, shape)
        weights = self._makeNPWeights(weights, shape)
        newentries = weights.sum()

        import numpy

        selection = numpy.isnan(q)
        numpy.bitwise_not(selection, selection)
        subweights = weights.copy()
        subweights[selection] = 0.0
        self.nanflow._numpy(data, subweights, shape)

        # avoid nan warning in calculations by flinging the nans elsewhere
        numpy.bitwise_not(selection, selection)
        q = numpy.array(q, dtype=numpy.float64)
        q[selection] = self.high
        weights = weights.copy()
        weights[selection] = 0.0

        numpy.greater_equal(q, self.low, selection)
        subweights[:] = weights
        subweights[selection] = 0.0
        self.underflow._numpy(data, subweights, shape)

        numpy.less(q, self.high, selection)
        subweights[:] = weights
        subweights[selection] = 0.0
        self.overflow._numpy(data, subweights, shape)

        if all(isinstance(value, Count) and value.transform is identity for value in self.values) and numpy.all(numpy.isfinite(q)) and numpy.all(numpy.isfinite(weights)):
            # Numpy defines histograms as including the upper edge of the last bin only, so drop that
            weights[q == self.high] == 0.0

            h, _ = numpy.histogram(q, self.num, (self.low, self.high), weights=weights)

            for hi, value in zip(h, self.values):
                value.fill(None, float(hi))

        else:
            q = numpy.array(q, dtype=numpy.float64)
            numpy.subtract(q, self.low, q)
            numpy.multiply(q, self.num, q)
            numpy.divide(q, self.high - self.low, q)
            numpy.floor(q, q)
            q = numpy.array(q, dtype=int)

            for index, value in enumerate(self.values):
                numpy.not_equal(q, index, selection)
                subweights[:] = weights
                subweights[selection] = 0.0
                value._numpy(data, subweights, shape)

        # no possibility of exception from here on out (for rollback)
        self.entries += float(newentries)
示例#14
0
    def _build_y(self, X, y, sample_weight, trim_duplicates=True):
        """Build the y_ IsotonicRegression."""
        check_consistent_length(X, y, sample_weight)
        X, y = [check_array(x, ensure_2d=False) for x in [X, y]]

        y = as_float_array(y)
        self._check_fit_data(X, y, sample_weight)

        # Determine increasing if auto-determination requested
        if self.increasing == 'auto':
            self.increasing_ = check_increasing(X, y)
        else:
            self.increasing_ = self.increasing

        # If sample_weights is passed, removed zero-weight values and clean
        # order
        if sample_weight is not None:
            sample_weight = check_array(sample_weight, ensure_2d=False)
            mask = sample_weight > 0
            X, y, sample_weight = X[mask], y[mask], sample_weight[mask]
        else:
            sample_weight = np.ones(len(y))

        order = np.lexsort((y, X))
        X, y, sample_weight = [astype(array[order], np.float64, copy=False)
                               for array in [X, y, sample_weight]]
        unique_X, unique_y, unique_sample_weight = _make_unique(
            X, y, sample_weight)

        # Store _X_ and _y_ to maintain backward compat during the deprecation
        # period of X_ and y_
        self._X_ = X = unique_X
        self._y_ = y = isotonic_regression(unique_y, unique_sample_weight,
                                           self.y_min, self.y_max,
                                           increasing=self.increasing_)

        # Handle the left and right bounds on X
        self.X_min_, self.X_max_ = np.min(X), np.max(X)

        if trim_duplicates:
            # Remove unnecessary points for faster prediction
            keep_data = np.ones((len(y),), dtype=bool)
            # Aside from the 1st and last point, remove points whose y values
            # are equal to both the point before and the point after it.
            keep_data[1:-1] = np.logical_or(
                np.not_equal(y[1:-1], y[:-2]),
                np.not_equal(y[1:-1], y[2:])
            )
            return X[keep_data], y[keep_data]
        else:
            # The ability to turn off trim_duplicates is only used to it make
            # easier to unit test that removing duplicates in y does not have
            # any impact the resulting interpolation function (besides
            # prediction speed).
            return X, y
def LabelPerimeter(L, Connectivity=4):
    """Converts a label or binary mask image to a binary perimeter image.

    Uses 4-neighbor or 8-neighbor shifts to detect pixels whose values do
    not agree with their neighbors.

    Parameters
    ----------
    L : array_like
        A label or binary mask image.
    Connectivity : double or int
        Neighborhood connectivity to evaluate. Valid values are 4 or 8.
        Default value = 4.

    Returns
    -------
    Mask : array_like
        A binary image where object perimeter pixels have value 1, and
        non-perimeter pixels have value 0.

    See Also
    --------
    EmbedBounds
    """

    # initialize temporary variable
    Mask = np.zeros(L.shape)
    Temp = np.zeros(L.shape)

    # check left-right neighbors
    Temp[:, 0:-2] = np.not_equal(L[:, 0:-2], L[:, 1:-1])
    Temp[:, 1:-1] = np.logical_or(Temp[:, 1:-1], Temp[:, 0:-2])
    Mask = np.logical_or(Mask, Temp)

    # check up-down neighbors
    Temp[0:-2, :] = np.not_equal(L[0:-2, :], L[1:-1, :])
    Temp[1:-1, :] = np.logical_or(Temp[1:-1, :], Temp[0:-2, :])
    Mask = np.logical_or(Mask, Temp)

    # additional calculations if Connectivity == 8
    if(Connectivity == 8):

        # slope 1 diagonal shift
        Temp[1:-1, 0:-2] = np.not_equal(L[0:-2, 1:-2], L[1:-1, 0:-2])
        Temp[0:-2, 1:-1] = np.logical_or(Temp[0:-2, 1:-1], Temp[1:-1, 0:-2])
        Mask = np.logical_or(Mask, Temp)

        # slope -1 diagonal shift
        Temp[1:-1, 1:-1] = np.not_equal(L[0:-2, 0:-2], L[1:-1, 1:-1])
        Temp[0:-2, 0:-2] = np.logical_or(Temp[0:-2, 0:-2], Temp[1:-1, 1:-1])
        Mask = np.logical_or(Mask, Temp)

    # generate label-valued output
    return Mask.astype(np.uint32) * L
示例#16
0
def fact(x):
    p = equal(x, 0)
    k = add(x, p)
    i = not_equal(k, 1).astype(int)
    z = 1
    while i.any().astype(bool):

        z = multiply(z, k)
        subtract(k, i, k)
        i = not_equal(k, 1).astype(int)

    return z
示例#17
0
    def getModelData(model, air, selected_mogs, type1, type2=""):
        data = np.array([])
        type2 = ""

        tt = np.array([])
        et = np.array([])
        in_vect = np.array([])
        mogs = []
        for i in selected_mogs:
            mogs.append(model.mogs[i])

        if type1 == "tt":
            fac_dt = 1

            mog = mogs[0]
            ind = np.not_equal(mog.tt, -1).T
            tt, t0 = mog.getCorrectedTravelTimes(air)
            tt = tt.T
            et = fac_dt * mog.f_et * mog.et.T
            in_vect = mog.in_vect.T
            no = np.arange(mog.data.ntrace).T

            if len(mogs) > 1:
                for n in range(1, len(model.mogs)):
                    mog = mogs[n]
                    ind = np.concatenate((ind, np.not_equal(mog.tt, -1).T), axis=0)
                    tt = np.concatenate((tt, mog.getCorrectedTravelTimes(air)[0].T), axis=0)
                    et = np.concatenate((et, fac_dt * mog.et * mog.f_et.T), axis=0)
                    in_vect = np.concatenate((in_vect, mog.in_vect.T), axis=0)
                    no = np.concatenate((no, np.arange(mog.ntrace + 1).T), axis=0)

            ind = np.equal((ind.astype(int) + in_vect.astype(int)), 2)

            data = np.array([tt[ind], et[ind], no[ind]]).T

            return data, ind

        if type2 == "depth":
            data, ind = getModelData(model, air, selected_mogs, type1)  # @UndefinedVariable
            mog = mogs[0]
            tt = mog.Tx_z_orig.T
            et = mog.Rx_z_orig.T
            in_vect = mog.in_vect.T
            if len(mogs) > 1:
                for n in (1, len(mogs)):
                    tt = np.concatenate((tt, mogs[n].Tx_z_orig.T), axis=0)
                    et = np.concatenate((et, mogs[n].Rx_z_orig.T), axis=0)
                    in_vect = np.concatenate((in_vect, mogs[n].in_vect.T), axis=0)

            ind = np.equal((ind.astype(int) + in_vect.astype(int)), 2)
            data = np.array([tt[ind], et[ind], no[ind]]).T
            return data, ind
示例#18
0
 def step_callback(*args, **kwargs):
     nonlocal model, optimiser, context, w, b, var, call_count
     context.optimiser_updated = False
     mon.update_optimiser(context, *args, **kwargs)
     w_new, b_new, var_new = model.enquire_session().run([model.w.unconstrained_tensor,
                                                          model.b.unconstrained_tensor,
                                                          model.var.unconstrained_tensor])
     self.assertTrue(np.alltrue(np.not_equal(w, w_new)))
     self.assertTrue(np.alltrue(np.not_equal(b, b_new)))
     self.assertTrue(np.alltrue(np.not_equal(var, var_new)))
     self.assertTrue(context.optimiser_updated)
     call_count += 1
     w, b, var = w_new, b_new, var_new
示例#19
0
def svm_bench():
    data_file = "./data/dataset.pkl"
    train_set, valid_set, test_set, word2id, pop2id, type2id = dataset.load_data(data_file)

    train_set_x, train_set_y = train_set
    train_set_pop_y, train_set_type_y, train_set_loc_y = train_set_y

    valid_set_x, valid_set_y = valid_set
    valid_set_pop_y, valid_set_type_y, valid_set_loc_y = valid_set_y
    
    test_set_x, test_set_y = test_set
    test_set_pop_y, test_set_type_y, test_set_loc_y = test_set_y
    
    id2word = {v:k for k,v in word2id.items()}
    word_train_set_x = [sen_dig2word(doc, id2word) for doc in train_set_x]
    word_valid_set_x = [sen_dig2word(doc, id2word) for doc in valid_set_x]
    word_test_set_x = [sen_dig2word(doc, id2word) for doc in test_set_x]
    
    # construct the word count matrix
    
    # construct the word count matrix
    count_vect = CountVectorizer()
    x_train_count = count_vect.fit_transform(word_train_set_x)
    x_valid_count = count_vect.transform(word_valid_set_x)
    x_test_count = count_vect.transform(word_test_set_x)

    tfidf_transformer = TfidfTransformer()
    x_train_tfidf = tfidf_transformer.fit_transform(x_train_count)
    x_valid_tfidf = tfidf_transformer.transform(x_valid_count)
    x_test_tfidf = tfidf_transformer.transform(x_test_count)

    # train the pop model
    pop_clf = svm.LinearSVC().fit(x_train_tfidf, train_set_pop_y)
    pop_pred = pop_clf.predict(x_valid_tfidf)
    pop_pred_test = pop_clf.predict(x_test_tfidf)

    # compute the performance
    pop_errors = np.mean(np.not_equal(pop_pred, valid_set_pop_y))
    pop_errors_test = np.mean(np.not_equal(pop_pred_test, test_set_pop_y))

    # train the event type model
    type_clf = svm.LinearSVC().fit(x_train_tfidf, train_set_type_y)
    type_pred = type_clf.predict(x_valid_tfidf)
    type_pred_test = type_clf.predict(x_test_tfidf)

    # compute the performance
    type_errors = np.mean(np.not_equal(type_pred, valid_set_type_y))
    type_errors_test = np.mean(np.not_equal(type_pred_test, test_set_type_y))

    print "SVM Valid--> Type error: %0.2f, Popuation error: %0.2f" % (type_errors, pop_errors)
    print "SVM Tes--> Type error: %0.2f, Popuation error: %0.2f" % (type_errors_test, pop_errors_test)
示例#20
0
 def test_simplify(self):
     origEdgeImages = dm.load_rasters_from_dir('../images/manual/edges')
     redr_edge_images = []
     simp_edge_images = []
     edgeSets = []
     simp_edge_sets = []
     for img in origEdgeImages:
         edges = ch.chain(img)
         edgeSets.append(edges)
         simplified_set = []
         edge_image = np.zeros_like(img)
         simp_edge_image = np.zeros_like(img)
         for chain in edges:
             edge = et.Edge(chain)
             edge.draw(edge_image, 255)
             simp_chain = ch.simplify_chain(chain,1)
             simp_edge = et.Edge(simp_chain)
             simp_edge.draw(simp_edge_image, 255)
             simplified_set.append(simp_chain)
         redr_edge_images.append(edge_image)
         simp_edge_images.append(simp_edge_image)
         simp_edge_sets.append(simplified_set)
         redr_matches_orig = np.array_equal(img, edge_image)
         if not redr_matches_orig:
             bad_pixels = np.argwhere(np.not_equal(img,edge_image).astype(np.uint8))
             copy = cv2.cvtColor(np.copy(img),cv2.COLOR_GRAY2BGR)
             for pixel in bad_pixels:
                 copy[pixel[0],pixel[1]] = (0,0,255)
             for chain in edges:
                 for pt in chain:
                     copy[pt[0],pt[1]] = (255,128,2)
             cv2.imwrite('bad_pixels.png',copy)
         self.assertTrue(redr_matches_orig,
                         "The redrawn edge image does not match the original image."+
                         " Percentage of unmatched pixels: %f" % 
                         (float(np.not_equal(img,edge_image).sum())/img.size))
         simp_matches_orig = np.array_equal(img, simp_edge_image) 
         if not simp_matches_orig:
             bad_pixels = np.argwhere(np.not_equal(img,simp_edge_image).astype(np.uint8))
             copy = cv2.cvtColor(np.copy(img),cv2.COLOR_GRAY2BGR)
             for pixel in bad_pixels:
                 if(simp_edge_image[pixel[0],pixel[1]] != 0):
                     copy[pixel[0],pixel[1]] = (0,0,255)
             for simp_chain in simplified_set:
                 for pt in simp_chain:
                     copy[pt[0],pt[1]] = (255,128,2)
             cv2.imwrite('bad_pixels.png',copy)    
         self.assertTrue(simp_matches_orig,
                         "The simplified edge image does not match the original image."+
                         " Percentage of unmatched pixels: %f" % 
                         (float(np.not_equal(img,simp_edge_image).sum())/img.size))
示例#21
0
 def test_visiting_stepping(self):
     lu = list(zip(*self.ld_bounds))
     lower = np.array(lu[0])
     upper = np.array(lu[1])
     dim = lower.size
     vd = VisitingDistribution(lower, upper, self.qv, self.rs)
     values = np.zeros(dim)
     x_step_low = vd.visiting(values, 0, self.high_temperature)
     # Make sure that only the first component is changed
     assert_equal(np.not_equal(x_step_low, 0), True)
     values = np.zeros(dim)
     x_step_high = vd.visiting(values, dim, self.high_temperature)
     # Make sure that component other than at dim has changed
     assert_equal(np.not_equal(x_step_high[0], 0), True)
示例#22
0
    def test_acceptRejectMove(self, blues_sim, state_keys, caplog):
        # Check positions are different from stepNCMC
        md_state = BLUESSimulation.getStateFromContext(blues_sim._md_sim.context, state_keys)
        ncmc_state = BLUESSimulation.getStateFromContext(blues_sim._ncmc_sim.context, state_keys)
        assert np.not_equal(md_state['positions'], ncmc_state['positions']).all()

        caplog.set_level(logging.INFO)
        blues_sim._acceptRejectMove()
        ncmc_state = BLUESSimulation.getStateFromContext(blues_sim._ncmc_sim.context, state_keys)
        md_state = BLUESSimulation.getStateFromContext(blues_sim._md_sim.context, state_keys)
        if 'NCMC MOVE ACCEPTED' in caplog.text:
            assert np.equal(md_state['positions'], ncmc_state['positions']).all()
        elif 'NCMC MOVE REJECTED' in caplog.text:
            assert np.not_equal(md_state['positions'], ncmc_state['positions']).all()
示例#23
0
def compute_image(nf, colorizer, region, antialiased = 3):
    A, B = region.A, region.B
    out = np.zeros((A, B, 3), dtype = float)

    ll = region.lowerleft()
    dx = region.dx()

    # -1 will be used for missing the target
    target  = np.zeros((A, B), dtype = int)
    steps   = np.zeros((A, B), dtype = float)

    # First, compute every pixel
    for i, j in region.pixels():
        # compute center of pixel
        z = ll + dx * (i + 1j * j)
        result = nf.converge(z)

        if result is None:
            target[i, j] = -1
            steps[i, j] = 0
        else:
            target[i, j] = result[0]
            steps[i, j] = result[1]

        out[i, j, :] = colorizer.color(result)

    if antialiased >= 2:
        aa = list(np.linspace(-1, 1, antialiased + 2)[1:-1])

        # Now, antialias the boundaries
        boundary = np.zeros((A, B), dtype = bool)
        boundary[1:, :]     |= np.not_equal(target[1:, :], target[:-1, :])
        boundary[:-1, :]    |= np.not_equal(target[1:, :], target[:-1, :])
        boundary[:, 1:]     |= np.not_equal(target[:, 1:], target[:, :-1])
        boundary[:, :-1]    |= np.not_equal(target[:, 1:], target[:, :-1])

        for i, j in region.pixels():
            if not boundary[i, j]:
                continue
            colors = []
            for i_ in aa:
                for j_ in aa:
                    z = ll + dx * (i + i_ + 1j * (j + j_))
                    result = nf.converge(z)
                    colors.append(colorizer.color(result))
            out[i, j, :] = colorizer.mean(colors)

    return colorizer.quantize(out)
    def _numpy(self, data, weights, shape):
        q = self.quantity(data)
        self._checkNPQuantity(q, shape)
        self._checkNPWeights(weights, shape)
        weights = self._makeNPWeights(weights, shape)
        newentries = weights.sum()

        import numpy

        selection = numpy.isnan(q)
        numpy.bitwise_not(selection, selection)
        subweights = weights.copy()
        subweights[selection] = 0.0
        self.nanflow._numpy(data, subweights, shape)

        # switch to float here like in bin.py else numpy throws
        # TypeError on trivial integer cases such as:
        # >>> q = numpy.array([1,2,3,4])
        # >>> np.divide(q,1,q)
        # >>> np.floor(q,q)
        q = numpy.array(q, dtype=numpy.float64)
        neginfs = numpy.isneginf(q)
        posinfs = numpy.isposinf(q)

        numpy.subtract(q, self.origin, q)
        numpy.divide(q, self.binWidth, q)
        numpy.floor(q, q)
        q = numpy.array(q, dtype=numpy.int64)
        q[neginfs] = LONG_MINUSINF
        q[posinfs] = LONG_PLUSINF

        selected = q[weights > 0.0]

        selection = numpy.empty(q.shape, dtype=numpy.bool)
        for index in numpy.unique(selected):
            if index != LONG_NAN:
                bin = self.bins.get(index)
                if bin is None:
                    bin = self.value.zero()
                    self.bins[index] = bin

                numpy.not_equal(q, index, selection)
                subweights[:] = weights
                subweights[selection] = 0.0
                bin._numpy(data, subweights, shape)

        # no possibility of exception from here on out (for rollback)
        self.entries += float(newentries)
示例#25
0
 def test_testUfuncs1 (self):
     "Test various functions such as sin, cos."
     (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
     self.assertTrue (eq(numpy.cos(x), cos(xm)))
     self.assertTrue (eq(numpy.cosh(x), cosh(xm)))
     self.assertTrue (eq(numpy.sin(x), sin(xm)))
     self.assertTrue (eq(numpy.sinh(x), sinh(xm)))
     self.assertTrue (eq(numpy.tan(x), tan(xm)))
     self.assertTrue (eq(numpy.tanh(x), tanh(xm)))
     olderr = numpy.seterr(divide='ignore', invalid='ignore')
     try:
         self.assertTrue (eq(numpy.sqrt(abs(x)), sqrt(xm)))
         self.assertTrue (eq(numpy.log(abs(x)), log(xm)))
         self.assertTrue (eq(numpy.log10(abs(x)), log10(xm)))
     finally:
         numpy.seterr(**olderr)
     self.assertTrue (eq(numpy.exp(x), exp(xm)))
     self.assertTrue (eq(numpy.arcsin(z), arcsin(zm)))
     self.assertTrue (eq(numpy.arccos(z), arccos(zm)))
     self.assertTrue (eq(numpy.arctan(z), arctan(zm)))
     self.assertTrue (eq(numpy.arctan2(x, y), arctan2(xm, ym)))
     self.assertTrue (eq(numpy.absolute(x), absolute(xm)))
     self.assertTrue (eq(numpy.equal(x, y), equal(xm, ym)))
     self.assertTrue (eq(numpy.not_equal(x, y), not_equal(xm, ym)))
     self.assertTrue (eq(numpy.less(x, y), less(xm, ym)))
     self.assertTrue (eq(numpy.greater(x, y), greater(xm, ym)))
     self.assertTrue (eq(numpy.less_equal(x, y), less_equal(xm, ym)))
     self.assertTrue (eq(numpy.greater_equal(x, y), greater_equal(xm, ym)))
     self.assertTrue (eq(numpy.conjugate(x), conjugate(xm)))
     self.assertTrue (eq(numpy.concatenate((x, y)), concatenate((xm, ym))))
     self.assertTrue (eq(numpy.concatenate((x, y)), concatenate((x, y))))
     self.assertTrue (eq(numpy.concatenate((x, y)), concatenate((xm, y))))
     self.assertTrue (eq(numpy.concatenate((x, y, x)), concatenate((x, ym, x))))
示例#26
0
 def test_testUfuncs1(self):
     # Test various functions such as sin, cos.
     (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
     assert_(eq(np.cos(x), cos(xm)))
     assert_(eq(np.cosh(x), cosh(xm)))
     assert_(eq(np.sin(x), sin(xm)))
     assert_(eq(np.sinh(x), sinh(xm)))
     assert_(eq(np.tan(x), tan(xm)))
     assert_(eq(np.tanh(x), tanh(xm)))
     with np.errstate(divide='ignore', invalid='ignore'):
         assert_(eq(np.sqrt(abs(x)), sqrt(xm)))
         assert_(eq(np.log(abs(x)), log(xm)))
         assert_(eq(np.log10(abs(x)), log10(xm)))
     assert_(eq(np.exp(x), exp(xm)))
     assert_(eq(np.arcsin(z), arcsin(zm)))
     assert_(eq(np.arccos(z), arccos(zm)))
     assert_(eq(np.arctan(z), arctan(zm)))
     assert_(eq(np.arctan2(x, y), arctan2(xm, ym)))
     assert_(eq(np.absolute(x), absolute(xm)))
     assert_(eq(np.equal(x, y), equal(xm, ym)))
     assert_(eq(np.not_equal(x, y), not_equal(xm, ym)))
     assert_(eq(np.less(x, y), less(xm, ym)))
     assert_(eq(np.greater(x, y), greater(xm, ym)))
     assert_(eq(np.less_equal(x, y), less_equal(xm, ym)))
     assert_(eq(np.greater_equal(x, y), greater_equal(xm, ym)))
     assert_(eq(np.conjugate(x), conjugate(xm)))
     assert_(eq(np.concatenate((x, y)), concatenate((xm, ym))))
     assert_(eq(np.concatenate((x, y)), concatenate((x, y))))
     assert_(eq(np.concatenate((x, y)), concatenate((xm, y))))
     assert_(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))
示例#27
0
 def __ne__(self, other):
     try:
         return np.not_equal(self, other)
     except Exception as exc:
         if isinstance(other, Quantity):
             raise exc
         return True
示例#28
0
  def test_get_speech_features_from_file_augmentation(self):
    augmentation = {
      'time_stretch_ratio': 0.0,
      'noise_level_min': -90,
      'noise_level_max': -46,
    }
    filename = 'open_seq2seq/test_utils/toy_speech_data/wav_files/46gc040q.wav'
    num_features = 161
    input_features_clean = get_speech_features_from_file(
      filename, num_features, augmentation=None,
    )
    input_features_augm = get_speech_features_from_file(
      filename, num_features, augmentation=augmentation,
    )
    # just checking that result is different with and without augmentation
    self.assertTrue(np.all(np.not_equal(input_features_clean,
                                        input_features_augm)))

    augmentation = {
      'time_stretch_ratio': 0.2,
      'noise_level_min': -90,
      'noise_level_max': -46,
    }
    input_features_augm = get_speech_features_from_file(
      filename, num_features, augmentation=augmentation,
    )
    self.assertNotEqual(
      input_features_clean.shape[0],
      input_features_augm.shape[0],
    )
    self.assertEqual(
      input_features_clean.shape[1],
      input_features_augm.shape[1],
    )
示例#29
0
def show_overlay(img3d, cc3d, ncc=10, s=85, xyz = 'xy',alpha=.8):
    """Shows the connected components overlayed over img3d

    Input
    ======
    img3d -- 3d array
    cc3d -- 3d array ( preferably of same shape as img3d, use get_3d_cc(...) )
    ncc -- where to cut off the color scale
    s -- slice to show
    xyz -- which projection to use in {'xy','xz','yz'}
    """
    cc = get_slice(cc3d,s,xyz)
    img = get_slice(img3d,s,xyz)

    notcc = np.isnan(cc)
    incc = np.not_equal(notcc,True)

    img4 = plt.cm.gray(img/np.nanmax(img))
    if ncc is not np.Inf:
        cc = plt.cm.jet(cc/float(ncc))
    else:
        cc = plt.cm.jet(np.log(cc)/np.log(np.nanmax(cc)))

    cc[notcc,:]=img4[notcc,:]
    cc[incc,3] = 1-img[incc]/(2*np.nanmax(img))

    plt.imshow(cc)
示例#30
0
def db_spline(y):
    ''' Background subtraction by fitting spline to the baseline.

    Arguments:
    y -- input array, 1D np.array

    Returns:
    y_db -- debaselined array, 1D np.array
    '''

    # Because of the discharge disturbance, the background does not exactly
    # match the signal. This creates a curved baseline after background
    # subtraction. Try to fit a B-spline to the baseline.
    x = np.arange(len(y)) / (len(y)-1)
    # Construct weight
    weight = weight_spline(y)
    w_nonzero = np.not_equal(weight, 0)
    # Interpolate spline
    spline = interpolate.UnivariateSpline(x, y, w=weight, k=5)
    # Remove spline
    y_db = y - spline(x)
    # Remove linear feature
    ppoly = np.polyfit(x[w_nonzero], y_db[w_nonzero], 1)
    y_db = y_db - np.polyval(ppoly, x)

    return y_db
示例#31
0
def read_data(filename):
    """
    Reads an Intan Technologies RHD2000 data file generated by the evaluation board GUI.
    Returns data in a dictionary in raw sample units

    Adapted from Intan sample code...
    Michael Gibson 17 July 2015
    Modified Adrian Foy Sep 2018
    """

    tic = time.time()
    fid = open(filename, 'rb')
    filesize = os.path.getsize(filename)

    header = read_header(fid)

    print('{} amplifier channels'.format(header['num_amplifier_channels']))
    print('{} auxiliary input channels'.format(
        header['num_aux_input_channels']))
    print('{} supply voltage channels'.format(
        header['num_supply_voltage_channels']))
    print('{} board ADC channels'.format(header['num_board_adc_channels']))
    print('{} board digital input channels'.format(
        header['num_board_dig_in_channels']))
    print('{} board digital output channels'.format(
        header['num_board_dig_out_channels']))
    print('{} temperature sensors channels'.format(
        header['num_temp_sensor_channels']))

    # Determine how many samples the data file contains.
    bytes_per_block = get_bytes_per_data_block(header)

    # How many data blocks remain in this file?
    data_present = False
    bytes_remaining = filesize - fid.tell()
    if bytes_remaining > 0:
        data_present = True

    if bytes_remaining % bytes_per_block != 0:
        raise Exception(
            'File size wrong: should have a whole number of data blocks')

    num_data_blocks = int(bytes_remaining / bytes_per_block)

    num_amplifier_samples = header[
        'num_samples_per_data_block'] * num_data_blocks
    num_aux_input_samples = int(
        (header['num_samples_per_data_block'] / 4) * num_data_blocks)
    num_supply_voltage_samples = 1 * num_data_blocks
    num_board_adc_samples = header[
        'num_samples_per_data_block'] * num_data_blocks
    num_board_dig_in_samples = header[
        'num_samples_per_data_block'] * num_data_blocks
    num_board_dig_out_samples = header[
        'num_samples_per_data_block'] * num_data_blocks

    record_time = num_amplifier_samples / header['sample_rate']

    if data_present:
        print(
            'File contains {:0.3f} seconds of data.  Amplifiers sampled at {:0.2f} kS/s.'
            .format(record_time, header['sample_rate'] / 1000))
    else:
        print('Header contains no data.  Amplifiers sampled at {:0.2f} kS/s.'.
              format(header['sample_rate'] / 1000))

    if data_present:
        # Pre-allocate memory for data.
        print('')
        print('Allocating memory for data...')

        data = {}

        data['num_amplifier_samples'] = num_amplifier_samples

        if (header['version']['major'] == 1 and header['version']['minor'] >= 2
            ) or (header['version']['major'] > 1):
            data['t_amplifier'] = np.zeros(num_amplifier_samples, dtype=np.int)
        else:
            data['t_amplifier'] = np.zeros(num_amplifier_samples,
                                           dtype=np.uint)

        # NOTE: Changed from uint to uint16
        data['amplifier_data'] = np.zeros(
            [header['num_amplifier_channels'], num_amplifier_samples],
            dtype=np.uint16)
        data['aux_input_data'] = np.zeros(
            [header['num_aux_input_channels'], num_aux_input_samples],
            dtype=np.uint)
        data['supply_voltage_data'] = np.zeros([
            header['num_supply_voltage_channels'], num_supply_voltage_samples
        ],
                                               dtype=np.uint)
        data['temp_sensor_data'] = np.zeros(
            [header['num_temp_sensor_channels'], num_supply_voltage_samples],
            dtype=np.uint)
        data['board_adc_data'] = np.zeros(
            [header['num_board_adc_channels'], num_board_adc_samples],
            dtype=np.uint)

        # by default, this script interprets digital events (digital inputs and outputs) as booleans
        # if unsigned int values are preferred(0 for False, 1 for True),
        # replace the 'dtype=np.bool' argument with 'dtype=np.uint' as shown
        # the commented line below illustrates this for digital input data;
        # the same can be done for digital out

        # data['board_dig_in_data'] = np.zeros([header['num_board_dig_in_channels'],
        #                                       num_board_dig_in_samples], dtype=np.uint)
        data['board_dig_in_data'] = np.zeros(
            [header['num_board_dig_in_channels'], num_board_dig_in_samples],
            dtype=np.bool)
        data['board_dig_in_raw'] = np.zeros(num_board_dig_in_samples,
                                            dtype=np.uint)

        data['board_dig_out_data'] = np.zeros(
            [header['num_board_dig_out_channels'], num_board_dig_out_samples],
            dtype=np.bool)
        data['board_dig_out_raw'] = np.zeros(num_board_dig_out_samples,
                                             dtype=np.uint)

        # Read sampled data from file.
        print('Reading data from file...')

        # Initialize indices used in looping
        indices = {}
        indices['amplifier'] = 0
        indices['aux_input'] = 0
        indices['supply_voltage'] = 0
        indices['board_adc'] = 0
        indices['board_dig_in'] = 0
        indices['board_dig_out'] = 0

        print_increment = 10
        percent_done = print_increment
        for i in range(num_data_blocks):
            read_one_data_block(data, header, indices, fid)

            # Increment indices
            indices['amplifier'] += header['num_samples_per_data_block']
            indices['aux_input'] += int(header['num_samples_per_data_block'] /
                                        4)
            indices['supply_voltage'] += 1
            indices['board_adc'] += header['num_samples_per_data_block']
            indices['board_dig_in'] += header['num_samples_per_data_block']
            indices['board_dig_out'] += header['num_samples_per_data_block']

            fraction_done = 100 * (1.0 * i / num_data_blocks)
            if fraction_done >= percent_done:
                print('.')
                #                 print('{}% done...'.format(percent_done))
                percent_done = percent_done + print_increment

        # Make sure we have read exactly the right amount of data.
        bytes_remaining = filesize - fid.tell()
        if bytes_remaining != 0:
            raise Exception('Error: End of file not reached.')

    # Close data file.
    fid.close()

    if (data_present):
        print('Parsing data...')

        # Extract digital input channels to separate variables.
        for i in range(header['num_board_dig_in_channels']):
            data['board_dig_in_data'][i, :] = np.not_equal(
                np.bitwise_and(
                    data['board_dig_in_raw'],
                    (1 << header['board_dig_in_channels'][i]['native_order'])),
                0)

        # Extract digital output channels to separate variables.
        for i in range(header['num_board_dig_out_channels']):
            data['board_dig_out_data'][i, :] = np.not_equal(
                np.bitwise_and(data['board_dig_out_raw'], (
                    1 << header['board_dig_out_channels'][i]['native_order'])),
                0)

        # Scale voltage levels appropriately.
        # NOTE: Commented out to reduce size of file by 4x by storing
        # 16 bit ints in the resulting .npy
        # data['amplifier_data'] = np.multiply(
        #     0.195, (data['amplifier_data'].astype(np.int32) - 32768)) # units = microvolts
        data['aux_input_data'] = np.multiply(
            37.4e-6, data['aux_input_data'])  # units = volts
        data['supply_voltage_data'] = np.multiply(
            74.8e-6, data['supply_voltage_data'])  # units = volts
        if header['eval_board_mode'] == 1:
            data['board_adc_data'] = np.multiply(
                152.59e-6, (data['board_adc_data'].astype(np.int32) -
                            32768))  # units = volts
        elif header['eval_board_mode'] == 13:
            data['board_adc_data'] = np.multiply(
                312.5e-6, (data['board_adc_data'].astype(np.int32) -
                           32768))  # units = volts
        else:
            data['board_adc_data'] = np.multiply(
                50.354e-6, data['board_adc_data'])  # units = volts
        data['temp_sensor_data'] = np.multiply(
            0.01, data['temp_sensor_data'])  # units = deg C

        # Check for gaps in timestamps.
        num_gaps = np.sum(
            np.not_equal(data['t_amplifier'][1:] - data['t_amplifier'][:-1],
                         1))
        assert num_gaps == 0  # We don't handle missing samples in all our downstream analysis

        # Scale time steps (units = seconds).
        data['t_amplifier'] = data['t_amplifier'] / header['sample_rate']
        data['t_aux_input'] = data['t_amplifier'][range(
            0, len(data['t_amplifier']), 4)]
        data['t_supply_voltage'] = data['t_amplifier'][range(
            0, len(data['t_amplifier']), header['num_samples_per_data_block'])]
        data['t_board_adc'] = data['t_amplifier']
        data['t_dig'] = data['t_amplifier']
        data['t_temp_sensor'] = data['t_supply_voltage']

        # If the software notch filter was selected during the recording, apply the
        # same notch filter to amplifier data here.
        # assert header['notch_filter_frequency'] == 0
        if header['notch_filter_frequency'] > 0:
            print('Applying notch filter...')

        # print_increment = 10
        # percent_done = print_increment
        # for i in range(header['num_amplifier_channels']):
        #     data['amplifier_data'][i, :] = notch_filter(
        #         data['amplifier_data'][i, :], header['sample_rate'],
        #         header['notch_filter_frequency'], 10)
#
#               fraction_done = 100 * (i / header['num_amplifier_channels'])
#              if fraction_done >= percent_done:
#                 print('{}% done...'.format(percent_done))
#                percent_done += print_increment
    else:
        data = []

    # Move variables to result struct.
    result = data_to_result(header, data, data_present)

    # Add back in num samples for use in the load_experiment function
    result['num_amplifier_samples'] = data['num_amplifier_samples']
    print('num_amplifier_samples', result['num_amplifier_samples'])

    print('Done!  Elapsed time: {0:0.1f} seconds'.format(time.time() - tic))
    return result
示例#32
0
def _ConstantValue(tensor, partial):
    # TODO(touts): Support Variables?
    if not isinstance(tensor, ops.Tensor):
        raise TypeError("tensor is not a Tensor")
    if tensor.op.type == "Const":
        return MakeNdarray(tensor.op.get_attr("value"))
    elif tensor.op.type == "Shape":
        input_shape = tensor.op.inputs[0].get_shape()
        if input_shape.is_fully_defined():
            return np.array([dim.value for dim in input_shape.dims],
                            dtype=tensor.dtype.as_numpy_dtype)
        else:
            return None
    elif tensor.op.type == "Size":
        input_shape = tensor.op.inputs[0].get_shape()
        if input_shape.is_fully_defined():
            return np.prod([dim.value for dim in input_shape.dims],
                           dtype=np.int32)
        else:
            return None
    elif tensor.op.type == "Rank":
        input_shape = tensor.op.inputs[0].get_shape()
        if input_shape.ndims is not None:
            return np.ndarray(shape=(),
                              buffer=np.array([input_shape.ndims],
                                              dtype=np.int32),
                              dtype=np.int32)
        else:
            return None
    elif tensor.op.type == "Range":
        start = constant_value(tensor.op.inputs[0])
        if start is None:
            return None
        limit = constant_value(tensor.op.inputs[1])
        if limit is None:
            return None
        delta = constant_value(tensor.op.inputs[2])
        if delta is None:
            return None
        return np.arange(start,
                         limit,
                         delta,
                         dtype=tensor.dtype.as_numpy_dtype)
    elif tensor.op.type == "Cast":
        pre_cast = constant_value(tensor.op.inputs[0])
        if pre_cast is None:
            return None
        cast_dtype = dtypes.as_dtype(tensor.op.get_attr("DstT"))
        return pre_cast.astype(cast_dtype.as_numpy_dtype)
    elif tensor.op.type == "Concat":
        dim = constant_value(tensor.op.inputs[0])
        if dim is None:
            return None
        values = []
        for x in tensor.op.inputs[1:]:
            value = constant_value(x)
            if value is None:
                return None
            values.append(value)
        return np.concatenate(values, axis=dim)
    elif tensor.op.type == "ConcatV2":
        dim = constant_value(tensor.op.inputs[-1])
        if dim is None:
            return None
        values = []
        for x in tensor.op.inputs[:-1]:
            value = constant_value(x)
            if value is None:
                return None
            values.append(value)
        return np.concatenate(values, axis=dim)
    elif tensor.op.type == "Pack":
        values = []
        # Some imported GraphDefs have Pack ops with zero inputs. Those are invalid
        # and shouldn't be produced, but to deal sensibly with them here we check
        # and return None.
        if not tensor.op.inputs:
            return None
        # We can't handle axis != 0 Packs at the moment.
        if tensor.op.get_attr("axis") != 0:
            return None
        for x in tensor.op.inputs:
            value = constant_value(x, partial)
            if value is None and not partial:
                return None
            values.append(value)
        return np.array(values)
    elif tensor.op.type == "Fill":
        fill_shape = tensor.shape
        fill_value = constant_value(tensor.op.inputs[1])
        if fill_shape.is_fully_defined() and fill_value is not None:
            return np.full(fill_shape.as_list(),
                           fill_value,
                           dtype=fill_value.dtype)
        else:
            return None
    elif tensor.op.type == "Equal":
        value1 = constant_value(tensor.op.inputs[0])
        if value1 is None:
            return None
        value2 = constant_value(tensor.op.inputs[1])
        if value2 is None:
            return None
        return np.equal(value1, value2)
    elif tensor.op.type == "NotEqual":
        value1 = constant_value(tensor.op.inputs[0])
        if value1 is None:
            return None
        value2 = constant_value(tensor.op.inputs[1])
        if value2 is None:
            return None
        return np.not_equal(value1, value2)
    else:
        return None
示例#33
0
def get_exist_rewards(batch, anses):
    A_in_C = np.equal(np.array(batch[0]), anses[:, None, None])
    exist_rewards = np.any(A_in_C, axis=(1, 2))
    exist_rewards = np.logical_and(exist_rewards, np.not_equal(anses, 0))
    return exist_rewards
示例#34
0
def compare_dataframes(base,
                       compare,
                       return_orphans=True,
                       ignore_case=True,
                       print_info=False,
                       convert_np_timestamps=True):
    """
    Compare all common index and common column DataFrame values and
    report if any value is not equal in a returned dataframe.

    Values are compared only by index and column label, not order.
    Therefore, the only values compared are within common index rows
    and common columns.  The routine will report the common columns and
    any unique index rows when the print_info option is selected (True).

    Inputs are pandas dataframes and/or pandas series.

    This function works well when comparing initial data lists, such as
    those which may be received from opposing parties.

    If return_orphans, returns tuple (diffs, base_loners, compare_loners),
    else returns diffs.
    diffs is a differential dataframe.

    inputs
        base
            baseline dataframe or series
        compare
            dataframe or series to compare against the baseline (base)
        return_orphans
            separately calculate and return the rows which are unique to
            base and compare
        ignore_case
            convert the column labels and column data to be compared to
            lowercase - this will avoid differences detected based on string
            case
        print_info
            option to print out to console verbose statistical information
            and the dataframe(s) instead of returning dataframe(s)
        convert_np_timestamps
            numpy returns datetime64 objects when the source is a datetime
            date-only object.
            this option will convert back to a date-only object for comparison.

    """
    try:
        assert ((isinstance(base, pd.DataFrame)) |
                (isinstance(base, pd.Series))) and \
            ((isinstance(compare, pd.DataFrame)) |
             (isinstance(compare, pd.Series)))
    except AssertionError:
        print('Routine aborted. Inputs must be a pandas dataframe or series.')
        return

    if isinstance(base, pd.Series):
        base = pd.DataFrame(base)
    if isinstance(compare, pd.Series):
        compare = pd.DataFrame(compare)

    common_rows = list(base.index[base.index.isin(compare.index)])

    if print_info:
        print('\nROW AND INDEX INFORMATION:\n')
        print('base length:', len(base))
        print('comp length:', len(compare))
        print('common index count:', len(common_rows), '\n')

    # orphans section---------------------------------------------------------
    if return_orphans:
        base_orphans = list(base.index[~base.index.isin(compare.index)])
        compare_orphans = list(compare.index[~compare.index.isin(base.index)])
        base_col_name = 'base_orphans'
        compare_col_name = 'compare_orphans'

        base_loners = pd.DataFrame(base_orphans, columns=[base_col_name])
        compare_loners = pd.DataFrame(compare_orphans,
                                      columns=[compare_col_name])

        def find_label_locs(df, orphans):

            loc_list = []
            for orphan in orphans:
                loc_list.append(df.index.get_loc(orphan))
            return loc_list

        if base_orphans:
            base_loners['index_loc'] = find_label_locs(base, base_orphans)
            if print_info:
                print('BASE LONERS (rows, by index):')
                print(base_loners, '\n')
        else:
            if print_info:
                print('''There are no unique index rows in the base input vs.
                      the compare input.\n''')

        if compare_orphans:
            compare_loners['index_loc'] = find_label_locs(
                compare, compare_orphans)
            if print_info:
                print('COMPARE LONERS (rows, by index):')
                print(compare_loners, '\n')
        else:
            if print_info:
                print('''There are no unique index rows in the compare input
                      vs. the base input.\n''')
    # -----------------------------------------------------------------------

    base = base.loc[common_rows].copy()
    compare = compare.loc[common_rows].copy()

    unequal_cols = []
    equal_cols = []

    if ignore_case:
        base.columns = map(str.lower, base.columns)
        compare.columns = map(str.lower, compare.columns)

    common_cols = list(base.columns[base.columns.isin(compare.columns)])
    base_only_cols = list(base.columns[~base.columns.isin(compare.columns)])
    comp_only_cols = list(compare.columns[~compare.columns.isin(base.columns)])

    oddballs = base_only_cols.copy()
    oddballs.extend(comp_only_cols)

    all_columns = common_cols.copy()
    all_columns.extend(oddballs)

    if print_info:
        same_col_list = []
        print('\nCOMMON COLUMN equivalency:\n')
    for col in common_cols:
        if ignore_case:
            try:
                base[col] = base[col].str.lower()
                compare[col] = compare[col].str.lower()
            except:
                pass
        same_col = base[col].sort_index().equals(compare[col].sort_index())
        if print_info:
            same_col_list.append(same_col)
        if not same_col:
            unequal_cols.append(col)
        else:
            equal_cols.append(col)

    base = base[unequal_cols]
    compare = compare[unequal_cols]

    if print_info:
        same_col_df = pd.DataFrame(list(zip(common_cols, same_col_list)),
                                   columns=['common_col', 'equivalent?'])
        same_col_df.sort_values(['equivalent?', 'common_col'], inplace=True)
        same_col_df.reset_index(drop=True, inplace=True)
        print(same_col_df, '\n')
        print('\nCOLUMN INFORMATION:')
        print('\ncommon columns:\n', common_cols)
        print('\ncommon and equal columns:\n', equal_cols)
        print('\ncommon but unequal columns:\n', unequal_cols)
        print('\ncols only in base:\n', base_only_cols)
        print('\ncols only in compare:\n', comp_only_cols, '\n')

        col_df = pd.DataFrame(index=[all_columns])
        column_names = [
            'equal_cols', 'unequal_cols', 'common_cols', 'base_only_cols',
            'comp_only_cols', 'all_columns'
        ]
        for result_name in column_names:
            i = 0
            col_arr = np.empty_like(all_columns)
            for name in all_columns:
                if name in eval(result_name):
                    col_arr[i] = name
                i += 1
            col_df[result_name] = col_arr
        col_df.sort_values(['unequal_cols', 'equal_cols'], inplace=True)
        col_df.reset_index(drop=True, inplace=True)
        col_df.rename(columns={
            'unequal_cols': 'not_equal',
            'base_only_cols': 'base_only',
            'comp_only_cols': 'comp_only'
        },
                      inplace=True)
        print('\nCATEGORIZED COLUMN DATAFRAME:\n')
        print(col_df, '\n')

    zipped = []
    col_counts = []
    with warnings.catch_warnings():
        warnings.simplefilter('ignore', category=FutureWarning)
        for col in base:
            base_np = base[col].values
            compare_np = compare[col].values

            try:
                unequal = np.not_equal(base_np, compare_np)
            except:
                try:
                    mask = base.duplicated(subset=col, keep=False)
                    dups = list(base[mask][col])
                    print('error, duplicate values:')
                    print(pd.DataFrame(dups, columns=['dups']))
                except:
                    pass

            row_ = np.where(unequal)[0]
            index_ = base.iloc[row_].index
            col_ = np.array([col] * row_.size)
            base_ = base_np[unequal]
            compare_ = compare_np[unequal]
            if (base[col]).dtype == 'datetime64[ns]' and convert_np_timestamps:
                try:
                    base_ = base_.astype('M8[D]')
                    compare_ = compare_.astype('M8[D]')
                except:
                    pass
            zipped.extend(list(zip(row_, index_, col_, base_, compare_)))
            col_counts.append(row_.size)

    diffs = pd.DataFrame(zipped,
                         columns=['row', 'index', 'column', 'base', 'compare'])
    diffs.sort_values('row', inplace=True)
    diffs.reset_index(drop=True, inplace=True)

    if print_info:
        print('\nDIFFERENTIAL DATAFRAME:\n')
        print(diffs)
        print('\nSUMMARY:\n')
        print('''{!r} total differences found in
              common rows and columns\n'''.format(len(zipped)))

        if len(zipped) == 0:
            print('''Comparison complete, dataframes are
                  equivalent. \nIndex and Column order may be different\n''')
        else:
            print(
                'Breakdown by column:\n',
                pd.DataFrame(list(zip(base.columns, col_counts)),
                             columns=['column', 'diff_count']), '\n')

    else:
        if return_orphans:
            return diffs, base_loners, compare_loners
        else:
            return diffs
示例#35
0
    def apply_perturbation(self,
                           perturbed_nodes,
                           params,
                           weights,
                           parallel,
                           kind='element'):
        """

        Perturbation simulator, actually applying the perturbation
        to all the nodes affected by the perturbation.
        The optimizer is run if any switch is present, and edges connecting
        its predecessors are removed if the switch state is set to 'False'.

        :param list perturbed_nodes: nodes(s) involved in the
            perturbing event.
        :param dict params: values for the optimizer evolutionary algorithm.
            Dict of: {str: int, str: int, str: float, str: float, str: int}:
            - 'npop': number of individuals for each population (default to 300)
            - 'ngen': total number of generations (default to 100)
            - 'indpb': independent probability for attributes to be changed
            (default to 0.6)
            - 'tresh': threshold for applying crossover/mutation
            (default to 0.5)
            - 'nsel': number of individuals to select (default to 5)
        :param dict weights: weights for fitness evaluation on individuals.
            Dict of: {str: float, str: float, str: float}:
            - 'w1': weight multiplying number of actions (default to 1.0)
            - 'w2': weight multiplying total final service (default to 1.0)
            - 'w3': weight multiplying final graph size (default to 1.0)
        :param bool parallel: flag for parallel fitness evaluation of
            initial population
        :param str kind: type of simulation, used to label output files,
            default to 'element'

        .. note:: A perturbation, depending on the considered system,
            may spread in all directions starting from the damaged
            component(s) and may be affect nearby areas.
        """

        if self.G.switches:
            res = np.array(
                self.optimizer(perturbed_nodes, self.G.init_status, params,
                               weights, parallel))
            best = dict(
                zip(self.G.init_status.keys(), res[np.argmin(res[:, 1]), 0]))

            initial_condition_sw = list(self.G.init_status.values())
            final_condition_sw = list(best.values())
            flips = dict(
                zip(self.G.init_status.keys(),
                    np.not_equal(initial_condition_sw, final_condition_sw)))

        init_open_edges = {}
        for sw, closed in self.G.init_status.items():
            if not closed:
                logging.debug(f'Opened switch {sw} in initial configuration')
                for pred in list(self.G.predecessors(sw)):
                    # if final config is closed for this switch I memorize it
                    if flips[sw]:
                        init_open_edges[sw] = {pred: self.G[pred][sw]}
                    self.G.remove_edge(pred, sw)

        self.check_paths_and_measures(prefix='original_')

        self.G.clear_data([
            'shortest_path', 'shortest_path_length', 'efficiency',
            'nodal_efficiency', 'local_efficiency', 'computed_service',
            'closeness_centrality', 'betweenness_centrality',
            'indegree_centrality', 'outdegree_centrality', 'degree_centrality'
        ])

        if self.G.switches:
            for sw, closed in best.items():
                if flips[sw]:

                    if not closed:
                        logging.debug(
                            f'Switch {sw} finally open, first closed')
                        for pre in list(self.G.predecessors(sw)):
                            self.G.remove_edge(pre, sw)

                    else:
                        logging.debug(
                            f'Switch {sw} finally closed, first open')
                        for pre, attrs in init_open_edges[sw].items():
                            self.G.add_edge(pre, sw, **attrs)

            logging.debug(f'BEST: {best}, with fitness: {np.min(res[:, 1])}')
            self.G.final_status = best

        for node in perturbed_nodes:
            if node in self.G.nodes(): self.delete_a_node(node)

        self.check_paths_and_measures(prefix='final_')
        self.paths_df.to_csv('service_paths_' + str(kind) +
                             '_perturbation.csv',
                             index=False)

        status_area_fields = ['final_status', 'mark_status', 'status_area']
        self.update_output(status_area_fields)

        self.update_status_areas(self.damaged_areas)
        self.graph_characterization_to_file(str(kind) + '_perturbation.csv')
示例#36
0
# coding: utf-8
#from toy_problems import Data

import numpy as np
from sklearn.semi_supervised import LabelPropagation, LabelSpreading
from new_stability import *
GAMMA = 3e-6

original = np.load("datasets/spines2.npz")
shapes = np.sum(original["shapes_n"], axis=2)
##at least one classifier chose label for a spine
both_label_vector = np.logical_and(np.not_equal(original["kl1"], None),
                                   np.not_equal(original["kl2"], None))
##both classyfiers agreed on spine
both_match_vector = np.logical_and(both_label_vector,
                                   original["kl1"] == original["kl2"])
##both stubby, long & thin or mushroom
longs = original["kl1"] == "Long & thin"
mushrooms = original["kl1"] == "Mushroom"
stubbies = original["kl1"] == "Stubby"
##roznice w populacjach poszczegolnych typow
#print sum(longs), sum(mushrooms), sum(stubbies)
both_match_long_stubby_mush = lsm = both_match_vector & (longs | mushrooms
                                                         | stubbies)


def getIndicesOfTrue(vector):
    return np.array([i for i, x in enumerate(vector) if x])


def getLearningAndThrowAwayInd(len_learn, len_throw_away, learnable_obs=lsm):
def read_snap_dataset_as_list(dir, name):
    list_dir = os.listdir(dir)
    if len(list_dir) == 1 and osp.isdir(osp.join(dir, list_dir[0])):
        dir = osp.join(dir, list_dir[0])

    if 'ego-' in name:
        files = [
            file for file in os.listdir(dir) if osp.isfile(osp.join(dir, file))
        ]
        files.sort()

        data_list = []
        for i in range(5, len(files), 5):
            circles_file = files[i]
            edges_file = files[i + 1]
            egofeat_file = files[i + 2]
            feat_file = files[i + 3]
            # featnames_file = files[i+4]

            x = torch.from_numpy(np.loadtxt(osp.join(dir, feat_file))).long()
            indices = x[:, 0]
            indices_assoc = to_assoc(indices)
            x = x[:, 1:]

            circles = []
            f = open(osp.join(dir, circles_file), "r")
            c_line = f.readline()
            while not c_line == '':
                circles.append([
                    from_assoc(indices_assoc, int(i))
                    for i in c_line.split()[1:]
                ])
                c_line = f.readline()
            f.close()

            edge_index = np.loadtxt(osp.join(dir, edges_file))
            edge_index = torch.from_numpy(edge_index).transpose(0, 1).long()

            # TODO find more efficient way to do this
            for i in range(edge_index.shape[0]):
                for j in range(edge_index.shape[1]):
                    edge_index[i][j] = from_assoc(indices_assoc,
                                                  edge_index[i][j].item())

            x_ego = np.loadtxt(osp.join(dir, egofeat_file))
            x_ego = torch.from_numpy(x_ego).long()
            x = torch.cat((x, x_ego.unsqueeze(0)))

            # ego Node is connected so every other node
            edge_index_ego = torch.fill_(torch.zeros((2, x.shape[0] - 1)),
                                         x.shape[0] - 1)
            edge_index_ego[0] = torch.arange(x.shape[0] - 1)

            # nodes undirected in ego-Facebook
            if name == 'ego-Facebook':
                edge_index_ego2 = torch.fill_(torch.zeros((2, x.shape[0] - 1)),
                                              x.shape[0] - 1)
                edge_index_ego2[1] = torch.arange(x.shape[0] - 1)
                edge_index = torch.cat((edge_index, edge_index_ego.long(),
                                        edge_index_ego2.long()),
                                       dim=1)

            edge_index = torch.cat((edge_index, edge_index_ego.long()), dim=1)

            data = Data(x=x, edge_index=edge_index, circles=circles)
            data_list.append(data)

        return data_list

    elif 'soc-' in name:
        if name == 'soc-Pokec':
            # TODO? read out features from 'soc-pokec-profiles.txt'
            edge_index = np.loadtxt(
                osp.join(dir, 'soc-pokec-relationships.txt'))
            edge_index = torch.from_numpy(edge_index).transpose(0, 1).long()
            data = Data(edge_index=edge_index)
            return [data]
        else:
            list_dir = os.listdir(dir)
            if len(list_dir) == 1 and osp.isfile(osp.join(dir, list_dir[0])):
                edge_index = np.loadtxt(osp.join(dir, list_dir[0]))
                ids = np.unique(edge_index)
                for i, j in zip(ids, range(len(ids))):
                    edge_index[edge_index == i] = j
                assert np.sum(
                    np.not_equal(np.unique(edge_index),
                                 np.arange(len(ids)))) == 0
                edge_index = torch.from_numpy(edge_index).transpose(0, 1)\
                                                         .long()
                data = Data(edge_index=edge_index)
                return [data]

    elif 'wiki-' in name:
        if name == 'wiki-Vote':
            list_dir = os.listdir(dir)
            if len(list_dir) == 1 and osp.isfile(osp.join(dir, list_dir[0])):
                edge_index = np.loadtxt(osp.join(dir, list_dir[0]))
                ids = np.unique(edge_index)
                for i, j in zip(ids, range(len(ids))):
                    edge_index[edge_index == i] = j
                assert np.sum(
                    np.not_equal(np.unique(edge_index),
                                 np.arange(len(ids)))) == 0
                edge_index = torch.from_numpy(edge_index).transpose(0, 1)\
                                                         .long()
                data = Data(edge_index=edge_index)
                return [data]

        elif name == 'wiki-RfA':
            list_dir = os.listdir(dir)
            if len(list_dir) == 1 and osp.isfile(osp.join(dir, list_dir[0])):
                i = 0
                with open(osp.join(dir, list_dir[0])) as f:
                    line = f.readline()
                    while not line == '':
                        print(i, line)
                        if i == 10:
                            raise
                        i += 1
                        line = f.readline()
KDTree_Y = np.matrix(Result).astype(int).transpose()
KDTree_Y_class0 = np.matrix(Result[1] <= 5000).astype(int).transpose()
KDTree_Y_class1 = np.matrix(Result[1] > 5000).astype(int).transpose()
accuracy = (KDTree_Y == test_Y).mean()
error = (KDTree_Y != test_Y).mean()
print('Accuracy_KDT', accuracy)
print('Error_KDT', error)
Correct_class0_kdt = np.where(np.equal(test_Y, KDTree_Y_class0))[0].tolist()
Elements_Correctclass0_kdt = training_X[Correct_class0_kdt]
Elements_Correctclass0_kdt_x = Elements_Correctclass0[:, 0]
Elements_Correctclass0_kdt_y = Elements_Correctclass0[:, 1]
Correct_class1_kdt = np.where(np.equal(test_Y, KDTree_Y_class1))[0].tolist()
Elements_Correctclass1_kdt = training_X[Correct_class1_kdt]
Elements_Correctclass1_kdt_x = Elements_Correctclass1[:, 0]
Elements_Correctclass1_kdt_y = Elements_Correctclass1[:, 1]
Incorrect_class0_kdt = np.where(np.not_equal(test_Y,
                                             KDTree_Y_class0))[0].tolist()
Elements_Incorrectclass0_kdt = training_X[Incorrect_class0_kdt]
Elements_Incorrectclass0_kdt_x = Elements_Correctclass0[:, 0]
Elements_Incorrectclass0_kdt_y = Elements_Correctclass0[:, 1]
Incorrect_class1_kdt = np.where(np.not_equal(test_Y,
                                             KDTree_Y_class0))[0].tolist()
Elements_Incorrectclass1_kdt = training_X[Incorrect_class1_kdt]
Elements_Incorrectclass1_kdt_x = Elements_Correctclass1[:, 0]
Elements_Incorrectclass1_kdt_y = Elements_Correctclass1[:, 1]
mplot.plot(Training_class0_x, Training_class0_y, 'x', color='b')
mplot.plot(Training_class1_x, Training_class1_y, 'x', color='m')
mplot.plot(Elements_Correctclass0_kdt_x,
           Elements_Correctclass0_kdt_y,
           'x',
           color='r')
mplot.plot(Elements_Correctclass1_kdt_x,
示例#39
0
    def test_compress_video_call(self):
        test_input = np.arange(12).reshape(1, 3, 1, 2, 2)
        video_compression = VideoCompression(video_format="mp4", constant_rate_factor=50, channels_first=True)

        assert np.any(np.not_equal(video_compression(test_input)[0], test_input))
示例#40
0
def divz(X,Y):
        return X/np.where(Y,Y,Y+1)*np.not_equal(Y,0)
示例#41
0
    def fit(self):
        """
        Trains a model
        """
        if self.optimizers is None:
            self.setup_optimizers()

        num_train = self.dataset.train_X.shape[0]

        loss_history = []
        train_acc_history = []
        val_acc_history = []

        for epoch in range(self.num_epochs):
            shuffled_indices = np.arange(num_train)
            np.random.shuffle(shuffled_indices)
            sections = np.arange(self.batch_size, num_train, self.batch_size)
            batches_indices = np.array_split(shuffled_indices, sections)

            batch_losses = []

            for batch_indices in batches_indices:
                # TODO Generate batches based on batch_indices and
                # use model to generate loss and gradients for all
                # the params
                # в качестве модели - self.model !!!
                #raise Exception("Not implemented!")

                loss = self.model.compute_loss_and_gradients(
                    self.dataset.train_X[batch_indices], self.dataset.
                    train_y[batch_indices])  # прошли вперед, назад,
                # посчитали градиент

                for param_name, param in self.model.params().items():
                    optimizer = self.optimizers[param_name]
                    param.value = optimizer.update(param.value, param.grad,
                                                   self.learning_rate)

                batch_losses.append(loss)

            if np.not_equal(self.learning_rate_decay, 1.0):
                # TODO: Implement learning rate decay
                #raise Exception("Not implemented!")
                self.learning_rate *= self.learning_rate_decay

            ave_loss = np.mean(batch_losses)

            train_accuracy = self.compute_accuracy(self.dataset.train_X,
                                                   self.dataset.train_y)

            val_accuracy = self.compute_accuracy(self.dataset.val_X,
                                                 self.dataset.val_y)

            print("Loss: %f, Train accuracy: %f, val accuracy: %f" %
                  (batch_losses[-1], train_accuracy, val_accuracy))

            loss_history.append(ave_loss)
            train_acc_history.append(train_accuracy)
            val_acc_history.append(val_accuracy)

        return loss_history, train_acc_history, val_acc_history
示例#42
0
def test_broadcast():
    a = sym.Variable("a")
    b = sym.Variable("b")
    shape = {'a': (3, 4, 5), 'b': (1, 5)}

    def _collapse(g):
        return g.reshape(-1, shape['b'][-1]).sum(0, keepdims=True)

    y = sym.broadcast_add(a, b)
    def _backward_add(head_grads, a, b):
        da = head_grads
        db = _collapse(head_grads)
        return da, db
    check_function(y, lambda a, b: a + b, _backward_add, shape=shape)

    y = sym.broadcast_sub(a, b)
    def _backward_sub(head_grads, a, b):
        da = head_grads
        db = -_collapse(head_grads)
        return da, db
    check_function(y, lambda a, b: a - b, _backward_sub, shape=shape)

    y = sym.broadcast_mul(a, b)
    def _backward_mul(head_grads, a, b):
        da = head_grads * b
        db = _collapse(head_grads * a)
        return da, db
    check_function(y, lambda a, b: a * b, _backward_mul, shape=shape)

    y = sym.broadcast_div(a, b)
    def _backward_div(head_grads, a, b):
        da = head_grads / b
        db = _collapse(- head_grads * a / b**2)
        return da, db
    # We avoid computing numerical derivatives too close to zero here
    check_function(y, lambda a, b: a / b, _backward_div, shape=shape, numerical_grads=False)
    check_function(y, lambda a, b: a / b, _backward_div, shape=shape,
                   in_range={'b': (0.1, 20)})

    y = sym.broadcast_mod(a, b)
    check_function(y,
                   lambda a, b: np.mod(a, b),
                   in_range={'a': (0.001, 100), 'b': (1, 100)}, dtype='int32', shape=shape)

    y = sym.broadcast_max(a, b)
    check_function(y, lambda a, b: np.maximum(a, b), shape=shape)

    y = sym.broadcast_min(a, b)
    check_function(y, lambda a, b: np.minimum(a, b), shape=shape)

    y = sym.broadcast_pow(a, b)
    check_function(y,
                   lambda a, b: np.power(a, b),
                   in_range={'a': (0.001, 100), 'b': (0.001, 2)}, shape=shape)

    y = sym.broadcast_left_shift(a, b)
    check_function(y, lambda a, b: a << b, dtype='int32', shape=shape)

    y = sym.broadcast_right_shift(a, b)
    check_function(y, lambda a, b: a >> b, dtype='int32', shape=shape)

    y = sym.broadcast_greater(a, b)
    check_function(y, lambda a, b: np.greater(a, b), shape=shape)

    y = sym.broadcast_less(a, b)
    check_function(y, lambda a, b: np.less(a, b), shape=shape)

    y = sym.broadcast_equal(a, b)
    check_function(y, lambda a, b: np.equal(a, b),
                   in_range={'a': (-2, 2), 'b': (-2, 2)}, dtype='int32', shape=shape)

    y = sym.broadcast_not_equal(a, b)
    check_function(y, lambda a, b: np.not_equal(a, b),
                   in_range={'a': (-2, 2), 'b': (-2, 2)}, dtype='int32', shape=shape)

    y = sym.broadcast_greater_equal(a, b)
    check_function(y, lambda a, b: np.greater_equal(a, b),
                   in_range={'a': (-3, 3), 'b': (-3, 3)}, dtype='int32', shape=shape)

    y = sym.broadcast_less_equal(a, b)
    check_function(y, lambda a, b: np.less_equal(a, b),
                   in_range={'a': (-3, 3), 'b': (-3, 3)}, dtype='int32', shape=shape)
示例#43
0
    def test_half_ufuncs(self):
        """Test the various ufuncs"""

        a = np.array([0, 1, 2, 4, 2], dtype=float16)
        b = np.array([-2, 5, 1, 4, 3], dtype=float16)
        c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16)

        assert_equal(np.add(a, b), [-2, 6, 3, 8, 5])
        assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1])
        assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6])
        assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625])

        assert_equal(np.equal(a, b), [False, False, False, True, False])
        assert_equal(np.not_equal(a, b), [True, True, True, False, True])
        assert_equal(np.less(a, b), [False, True, False, False, True])
        assert_equal(np.less_equal(a, b), [False, True, False, True, True])
        assert_equal(np.greater(a, b), [True, False, True, False, False])
        assert_equal(np.greater_equal(a, b), [True, False, True, True, False])
        assert_equal(np.logical_and(a, b), [False, True, True, True, True])
        assert_equal(np.logical_or(a, b), [True, True, True, True, True])
        assert_equal(np.logical_xor(a, b), [True, False, False, False, False])
        assert_equal(np.logical_not(a), [True, False, False, False, False])

        assert_equal(np.isnan(c), [False, False, False, True, False])
        assert_equal(np.isinf(c), [False, False, True, False, False])
        assert_equal(np.isfinite(c), [True, True, False, False, True])
        assert_equal(np.signbit(b), [True, False, False, False, False])

        assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])

        assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
        x = np.maximum(b, c)
        assert_(np.isnan(x[3]))
        x[3] = 0
        assert_equal(x, [0, 5, 1, 0, 6])
        assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
        x = np.minimum(b, c)
        assert_(np.isnan(x[3]))
        x[3] = 0
        assert_equal(x, [-2, -1, -np.inf, 0, 3])
        assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
        assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
        assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
        assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3])

        assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
        assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
        assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2]))
        assert_equal(np.square(b), [4, 25, 1, 16, 9])
        assert_equal(np.reciprocal(b),
                     [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
        assert_equal(np.ones_like(b), [1, 1, 1, 1, 1])
        assert_equal(np.conjugate(b), b)
        assert_equal(np.absolute(b), [2, 5, 1, 4, 3])
        assert_equal(np.negative(b), [2, -5, -1, -4, -3])
        assert_equal(np.positive(b), b)
        assert_equal(np.sign(b), [-1, 1, 1, 1, 1])
        assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b))
        assert_equal(np.frexp(b),
                     ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
        assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
示例#44
0
                verbose_ops_dict={
                    'data': data_ph,
                    'labels': label_ph,
                    'mu': model.z_mu,
                    'sigma': model.z_sigma,
                    'codes': model.latent_posterior_sample
                },
                feed_dict_fn=test_feed_dict_fn)
            data_vals = np.stack(eval_dict['data'])
            label_vals = np.argmax(np.stack(eval_dict['labels']), axis=1)
            mu_vals = np.stack(eval_dict['mu'])
            sigma_vals = np.stack(eval_dict['sigma'])
            code_vals = np.concatenate(eval_dict['codes'], axis=0)
            prediction_vals = model._decision_tree.predict(code_vals)

            mislabeled_mask = np.not_equal(label_vals, prediction_vals)
            mislabeled_idxs = np.arange(len(data_vals))[mislabeled_mask]
            mislabeled_data = data_vals[mislabeled_mask]
            mislabeled_label = label_vals[mislabeled_mask]
            mislabeled_mu = mu_vals[mislabeled_mask]
            mislabeled_sigma = sigma_vals[mislabeled_mask]
            mislabeled_code = code_vals[mislabeled_mask]
            mislabeled_prediction = prediction_vals[mislabeled_mask]

            c_means, c_sds = model.aggregate_posterior_parameters(
                session, label_ph, train_batches_per_epoch, train_feed_dict_fn)

            latent_code_ph = tf.placeholder(tf.float32)
            for id, instance_mu, instance_sigma, label, prediction in zip(
                    mislabeled_idxs, mislabeled_mu, mislabeled_sigma,
                    mislabeled_label, mislabeled_prediction):
示例#45
0
def assign_instances_for_scan(scene_name, pred_info, gt_file):
    try:
        gt_ids = util_3d.load_ids(gt_file)
    except Exception as e:
        util.print_error('unable to load ' + gt_file + ': ' + str(e))

    # get gt instances
    gt_instances = util_3d.get_instances(gt_ids, VALID_CLASS_IDS, CLASS_LABELS,
                                         ID_TO_LABEL)

    # associate
    gt2pred = gt_instances.copy()
    for label in gt2pred:
        for gt in gt2pred[label]:
            gt['matched_pred'] = []
    pred2gt = {}
    for label in CLASS_LABELS:
        pred2gt[label] = []
    num_pred_instances = 0
    # mask of void labels in the groundtruth
    bool_void = np.logical_not(np.in1d(gt_ids // 1000, VALID_CLASS_IDS))
    # go thru all prediction masks
    nMask = pred_info['label_id'].shape[0]
    for i in range(nMask):
        label_id = int(pred_info['label_id'][i])
        conf = pred_info['conf'][i]
        if not label_id in ID_TO_LABEL:
            continue
        label_name = ID_TO_LABEL[label_id]
        # read the mask
        pred_mask = pred_info['mask'][i]  # (N), long
        if len(pred_mask) != len(gt_ids):
            util.print_error('wrong number of lines in mask#%d: ' % (i) +
                             '(%d) vs #mesh vertices (%d)' %
                             (len(pred_mask), len(gt_ids)))
        # convert to binary
        pred_mask = np.not_equal(pred_mask, 0)
        num = np.count_nonzero(pred_mask)
        if num < MIN_REGION_SIZES[0]:
            continue  # skip if empty

        pred_instance = {}
        pred_instance['filename'] = '{}_{:03d}'.format(scene_name,
                                                       num_pred_instances)
        pred_instance['pred_id'] = num_pred_instances
        pred_instance['label_id'] = label_id
        pred_instance['vert_count'] = num
        pred_instance['confidence'] = conf
        pred_instance['void_intersection'] = np.count_nonzero(
            np.logical_and(bool_void, pred_mask))

        # matched gt instances
        matched_gt = []
        # go thru all gt instances with matching label
        for (gt_num, gt_inst) in enumerate(gt2pred[label_name]):
            intersection = np.count_nonzero(
                np.logical_and(gt_ids == gt_inst['instance_id'], pred_mask))
            if intersection > 0:
                gt_copy = gt_inst.copy()
                pred_copy = pred_instance.copy()
                gt_copy['intersection'] = intersection
                pred_copy['intersection'] = intersection
                matched_gt.append(gt_copy)
                gt2pred[label_name][gt_num]['matched_pred'].append(pred_copy)
        pred_instance['matched_gt'] = matched_gt
        num_pred_instances += 1
        pred2gt[label_name].append(pred_instance)

    return gt2pred, pred2gt
    def erode(self, dt, flooded_depths=None, **kwds):
        """Erode and deposit on the channel bed for a duration of *dt*.

        Erosion occurs according to the sediment dependent rules specified
        during initialization.

        Parameters
        ----------
        dt : float (years, only!)
            Timestep for which to run the component.
        flooded_depths : array or field name (m)
            Depths of flooding at each node, zero where no lake. Note that the
            component will dynamically update this array as it fills nodes
            with sediment (...but does NOT update any other related lake
            fields).
        """
        grid = self.grid
        node_z = grid.at_node['topographic__elevation']
        node_A = grid.at_node['drainage_area']
        flow_receiver = grid.at_node['flow__receiver_node']
        s_in = grid.at_node['flow__upstream_node_order']
        node_S = grid.at_node['topographic__steepest_slope']

        if type(flooded_depths) is str:
            flooded_depths = mg.at_node[flooded_depths]
            # also need a map of initial flooded conds:
            flooded_nodes = flooded_depths > 0.
        elif type(flooded_depths) is np.ndarray:
            assert flooded_depths.size == self.grid.number_of_nodes
            flooded_nodes = flooded_depths > 0.
            # need an *updateable* record of the pit depths
        else:
            # if None, handle in loop
            flooded_nodes = None
        steepest_link = 'flow__link_to_receiver_node'
        link_length = np.empty(grid.number_of_nodes, dtype=float)
        link_length.fill(np.nan)
        draining_nodes = np.not_equal(grid.at_node[steepest_link],
                                      BAD_INDEX_VALUE)
        core_draining_nodes = np.intersect1d(np.where(draining_nodes)[0],
                                             grid.core_nodes,
                                             assume_unique=True)
        link_length[core_draining_nodes] = grid.length_of_d8[
            grid.at_node[steepest_link][core_draining_nodes]]

        if self.Qc == 'MPM':
            if self.Dchar_in is not None:
                self.Dchar = self.Dchar_in
            else:
                assert not self.set_threshold, (
                    "Something is seriously wrong with your model " +
                    "initialization.")
                assert self.override_threshold, (
                    "You need to confirm to the module you intend it to " +
                    "internally calculate a shear stress threshold, " +
                    "with set_threshold_from_Dchar in the input file.")
                # we need to adjust the thresholds for the Shields number
                # & gs dynamically:
                variable_thresh = self.shields_crit * self.g * (
                    self.sed_density - self.fluid_density) * self.Dchar
            if self.lamb_flag:
                variable_shields_crit = 0.15 * node_S**0.25
                try:
                    variable_thresh = (variable_shields_crit *
                                       self.shields_prefactor_to_shear)
                except AttributeError:
                    variable_thresh = (
                        variable_shields_crit *
                        self.shields_prefactor_to_shear_noDchar * self.Dchar)

            node_Q = self.k_Q * self.runoff_rate * node_A**self._c
            shear_stress_prefactor_timesAparts = (self.shear_stress_prefactor *
                                                  node_Q**self.point6onelessb)
            try:
                transport_capacities_thresh = (
                    self.thresh * self.Qs_thresh_prefactor *
                    self.runoff_rate**(0.66667 * self._b) *
                    node_A**self.Qs_power_onAthresh)
            except AttributeError:
                transport_capacities_thresh = (
                    variable_thresh * self.Qs_thresh_prefactor *
                    self.runoff_rate**(0.66667 * self._b) *
                    node_A**self.Qs_power_onAthresh)

            transport_capacity_prefactor_withA = (
                self.Qs_prefactor * self.runoff_rate**(0.6 + self._b / 15.) *
                node_A**self.Qs_power_onA)

            internal_t = 0.
            break_flag = False
            dt_secs = dt * 31557600.
            counter = 0
            rel_sed_flux = np.empty_like(node_Q)
            # excess_vol_overhead = 0.

            while 1:
                # ^use the break flag, to improve computational efficiency for
                # runs which are very stable
                # we assume the drainage structure is forbidden to change
                # during the whole dt
                # note slopes will be *negative* at pits
                # track how many loops we perform:
                counter += 1
                downward_slopes = node_S.clip(0.)
                # this removes the tendency to transfer material against
                # gradient, including in any lake depressions
                # we DON'T immediately zero trp capacity in the lake.
                # positive_slopes = np.greater(downward_slopes, 0.)
                slopes_tothe07 = downward_slopes**0.7
                transport_capacities_S = (transport_capacity_prefactor_withA *
                                          slopes_tothe07)
                trp_diff = (transport_capacities_S -
                            transport_capacities_thresh).clip(0.)
                transport_capacities = np.sqrt(trp_diff * trp_diff * trp_diff)
                shear_stress = (shear_stress_prefactor_timesAparts *
                                slopes_tothe07)
                shear_tothe_a = shear_stress**self._a

                dt_this_step = dt_secs - internal_t
                # ^timestep adjustment is made AFTER the dz calc
                node_vol_capacities = transport_capacities * dt_this_step

                sed_into_node = np.zeros(grid.number_of_nodes, dtype=float)
                dz = np.zeros(grid.number_of_nodes, dtype=float)
                len_s_in = s_in.size
                cell_areas = self.cell_areas
                try:
                    raise NameError
                    # ^tripped out deliberately for now; doesn't appear to
                    # accelerate much
                    weave.inline(self.routing_code, [
                        'len_s_in', 'sed_into_node', 'transport_capacities',
                        'dz', 'cell_areas', 'dt_this_step',
                        'flow__receiver_node'
                    ])
                except NameError:
                    for i in s_in[::-1]:  # work downstream
                        cell_area = cell_areas[i]
                        if flooded_nodes is not None:
                            flood_depth = flooded_depths[i]
                        else:
                            flood_depth = 0.
                        sed_flux_into_this_node = sed_into_node[i]
                        node_capacity = transport_capacities[i]
                        # ^we work in volume flux, not volume per se here
                        node_vol_capacity = node_vol_capacities[i]
                        if flood_depth > 0.:
                            node_vol_capacity = 0.
                            # requires special case handling - as much sed as
                            # possible is dumped here, then the remainder
                            # passed on
                        if sed_flux_into_this_node < node_vol_capacity:
                            # ^note incision is forbidden at capacity
                            # flooded nodes never enter this branch
                            # #implementing the pseudoimplicit method:
                            try:
                                thresh = variable_thresh
                            except:  # it doesn't exist
                                thresh = self.thresh
                            dz_prefactor = self._K_unit_time * dt_this_step * (
                                shear_tothe_a[i] - thresh).clip(0.)
                            vol_prefactor = dz_prefactor * cell_area
                            (dz_here, sed_flux_out, rel_sed_flux_here,
                             error_in_sed_flux) = \
                                self.get_sed_flux_function_pseudoimplicit(
                                    sed_flux_into_this_node, node_vol_capacity,
                                    vol_prefactor, dz_prefactor)
                            # note now dz_here may never create more sed than
                            # the out can transport...
                            assert sed_flux_out <= node_vol_capacity, (
                                'failed at node ' + str(s_in.size - i) +
                                ' with rel sed flux ' +
                                str(sed_flux_out / node_capacity))
                            rel_sed_flux[i] = rel_sed_flux_here
                            vol_pass = sed_flux_out
                        else:
                            rel_sed_flux[i] = 1.
                            vol_dropped = (sed_flux_into_this_node -
                                           node_vol_capacity)
                            dz_here = -vol_dropped / cell_area
                            # with the pits, we aim to inhibit incision, but
                            # depo is OK. We have already zero'd any adverse
                            # grads, so sed can make it to the bottom of the
                            # pit but no further in a single step, which seems
                            # raeasonable. Pit should fill.
                            if flood_depth <= 0.:
                                vol_pass = node_vol_capacity
                            else:
                                height_excess = -dz_here - flood_depth
                                # ...above water level
                                if height_excess <= 0.:
                                    vol_pass = 0.
                                    # dz_here is already correct
                                    flooded_depths[i] += dz_here
                                else:
                                    dz_here = -flood_depth
                                    vol_pass = height_excess * cell_area
                                    # ^bit cheeky?
                                    flooded_depths[i] = 0.
                                    # note we must update flooded depths
                                    # transiently to conserve mass
                            # do we need to retain a small downhill slope?
                            # ...don't think so. Will resolve itself on next
                            # timestep.

                        dz[i] -= dz_here
                        sed_into_node[flow_receiver[i]] += vol_pass

                break_flag = True

                node_z[grid.core_nodes] += dz[grid.core_nodes]

                if break_flag:
                    break
                # do we need to reroute the flow/recalc the slopes here?
                # -> NO, slope is such a minor component of Diff we'll be OK
                # BUT could be important not for the stability, but for the
                # actual calc. So YES.
                node_S = np.zeros_like(node_S)
                node_S[core_draining_nodes] = (
                    node_z - node_z[flow_receiver]
                )[core_draining_nodes] / link_length[core_draining_nodes]
                internal_t += dt_this_step  # still in seconds, remember

        elif self.Qc == 'power_law':
            transport_capacity_prefactor_withA = self._Kt * node_A**self._mt
            erosion_prefactor_withA = self._K_unit_time * node_A**self._m
            # ^doesn't include S**n*f(Qc/Qc)
            internal_t = 0.
            break_flag = False
            dt_secs = dt * 31557600.
            counter = 0
            rel_sed_flux = np.empty_like(node_A)
            while 1:
                counter += 1
                # print counter
                downward_slopes = node_S.clip(0.)
                # positive_slopes = np.greater(downward_slopes, 0.)
                slopes_tothen = downward_slopes**self._n
                slopes_tothent = downward_slopes**self._nt
                transport_capacities = (transport_capacity_prefactor_withA *
                                        slopes_tothent)
                erosion_prefactor_withS = (erosion_prefactor_withA *
                                           slopes_tothen)  # no time, no fqs
                # shear_tothe_a = shear_stress**self._a

                dt_this_step = dt_secs - internal_t
                # ^timestep adjustment is made AFTER the dz calc
                node_vol_capacities = transport_capacities * dt_this_step

                sed_into_node = np.zeros(grid.number_of_nodes, dtype=float)
                dz = np.zeros(grid.number_of_nodes, dtype=float)
                cell_areas = self.cell_areas
                for i in s_in[::-1]:  # work downstream
                    cell_area = cell_areas[i]
                    if flooded_nodes is not None:
                        flood_depth = flooded_depths[i]
                    else:
                        flood_depth = 0.
                    sed_flux_into_this_node = sed_into_node[i]
                    node_capacity = transport_capacities[i]
                    # ^we work in volume flux, not volume per se here
                    node_vol_capacity = node_vol_capacities[i]
                    if flood_depth > 0.:
                        node_vol_capacity = 0.
                    if sed_flux_into_this_node < node_vol_capacity:
                        # ^note incision is forbidden at capacity
                        dz_prefactor = dt_this_step * erosion_prefactor_withS[i]
                        vol_prefactor = dz_prefactor * cell_area
                        (dz_here, sed_flux_out, rel_sed_flux_here,
                         error_in_sed_flux) = \
                            self.get_sed_flux_function_pseudoimplicit(
                                sed_flux_into_this_node, node_vol_capacity,
                                vol_prefactor, dz_prefactor)
                        # note now dz_here may never create more sed than the
                        # out can transport...
                        assert sed_flux_out <= node_vol_capacity, (
                            'failed at node ' + str(s_in.size - i) +
                            ' with rel sed flux ' +
                            str(sed_flux_out / node_capacity))
                        rel_sed_flux[i] = rel_sed_flux_here
                        vol_pass = sed_flux_out
                    else:
                        rel_sed_flux[i] = 1.
                        vol_dropped = (sed_flux_into_this_node -
                                       node_vol_capacity)
                        dz_here = -vol_dropped / cell_area
                        try:
                            isflooded = flooded_nodes[i]
                        except TypeError:  # was None
                            isflooded = False
                        if flood_depth <= 0. and not isflooded:
                            vol_pass = node_vol_capacity
                            # we want flooded nodes which have already been
                            # filled to enter the else statement
                        else:
                            height_excess = -dz_here - flood_depth
                            # ...above water level
                            if height_excess <= 0.:
                                vol_pass = 0.
                                # dz_here is already correct
                                flooded_depths[i] += dz_here
                            else:
                                dz_here = -flood_depth
                                vol_pass = height_excess * cell_area
                                # ^bit cheeky?
                                flooded_depths[i] = 0.

                    dz[i] -= dz_here
                    sed_into_node[flow_receiver[i]] += vol_pass
                break_flag = True

                node_z[grid.core_nodes] += dz[grid.core_nodes]

                if break_flag:
                    break
                # do we need to reroute the flow/recalc the slopes here?
                # -> NO, slope is such a minor component of Diff we'll be OK
                # BUT could be important not for the stability, but for the
                # actual calc. So YES.
                node_S = np.zeros_like(node_S)
                # print link_length[core_draining_nodes]
                node_S[core_draining_nodes] = (
                    node_z - node_z[flow_receiver]
                )[core_draining_nodes] / link_length[core_draining_nodes]
                internal_t += dt_this_step  # still in seconds, remember

        active_nodes = grid.core_nodes

        if self.return_ch_props:
            # add the channel property field entries,
            # 'channel__width', 'channel__depth', and 'channel__discharge'
            W = self.k_w * node_Q**self._b
            H = shear_stress / self.rho_g / node_S  # ...sneaky!
            grid.at_node['channel__width'][:] = W
            grid.at_node['channel__depth'][:] = H
            grid.at_node['channel__discharge'][:] = node_Q
            grid.at_node['channel__bed_shear_stress'][:] = shear_stress

        grid.at_node[
            'channel_sediment__volumetric_transport_capacity'][:] = transport_capacities
        grid.at_node['channel_sediment__volumetric_flux'][:] = sed_into_node
        grid.at_node['channel_sediment__relative_flux'][:] = rel_sed_flux
        # elevs set automatically to the name used in the function call.
        self.iterations_in_dt = counter

        return grid, grid.at_node['topographic__elevation']
示例#47
0
def acr_subs_get_data(idxPrc,              # Process ID  #noqa
                      strSubId,            # Data struc - Subject ID
                      lstVtkDpth01,        # Data struc - Pth vtk I
                      varNumDpth,          # Data struc - Num. depth levels
                      strPrcdData,         # Data struc - Str prcd VTK data
                      varNumLne,           # Data struc - Lns prcd data VTK
                      lgcSlct01,           # Criterion 1 - Yes or no?
                      strCsvRoi,           # Criterion 1 - CSV path
                      varNumHdrRoi,        # Criterion 1 - Header lines
                      lgcSlct02,           # Criterion 2 - Yes or no?
                      strVtkSlct02,        # Criterion 2 - VTK path
                      varThrSlct02,        # Criterion 2 - Threshold
                      lgcSlct03,           # Criterion 3 - Yes or no?
                      strVtkSlct03,        # Criterion 3 - VTK path
                      varThrSlct03,        # Criterion 3 - Threshold
                      lgcSlct04,           # Criterion 4 - Yes or no?
                      strVtkSlct04,        # Criterion 4 - VTK path
                      tplThrSlct04,        # Criterion 4 - Threshold
                      lgcNormDiv,          # Normalisation - Yes or no?
                      varNormIdx,          # Normalisation - Reference
                      varDpi,              # Plot - Dots per inch
                      varYmin,             # Plot - Minimum of Y axis
                      varYmax,             # Plot - Maximum of Y axis
                      lstConLbl,           # Plot - Condition labels
                      strXlabel,           # Plot - X axis label
                      strYlabel,           # Plot - Y axis label
                      strTitle,            # Plot - Title
                      strPltOtPre,         # Plot - Output file path prefix
                      strPltOtSuf,         # Plot - Output file path suffix
                      strMetaCon,          # Metacondition (stim/periphery)
                      queOut):             # Queue for output list
    """
    Obtaining & plotting single subject data for across subject analysis.

    This function loads the data for each subject for a multi-subject analysis
    and passes the data to the parent function for visualisation.
    """
    # Only print status messages if this is the first of several parallel
    # processes:
    if idxPrc == 0:
        print('------Loading single subject data: ' + strSubId)

    # *************************************************************************
    # *** Import data

    # Import CSV file with ROI definition
    if lgcSlct01:
        if idxPrc == 0:
            print('---------Importing CSV file with ROI definition (first '
                  + 'criterion)')
        aryRoiVrtx = load_csv_roi(strCsvRoi, varNumHdrRoi)
    # Otherwise, create dummy vector (for function I/O)
    else:
        aryRoiVrtx = 0

    # Import second criterion vtk file (all depth levels)
    if lgcSlct02:
        if idxPrc == 0:
            print('---------Importing second criterion vtk file (all depth '
                  + 'levels).')
        arySlct02 = load_vtk_multi(strVtkSlct02,
                                   strPrcdData,
                                   varNumLne,
                                   varNumDpth)
    # Otherwise, create dummy vector (for function I/O)
    else:
        arySlct02 = 0

    # Import third criterion vtk file (all depth levels)
    if lgcSlct03:
        if idxPrc == 0:
            print('---------Importing third criterion vtk file (all depth '
                  + 'levels).')
        arySlct03 = load_vtk_multi(strVtkSlct03,
                                   strPrcdData,
                                   varNumLne,
                                   varNumDpth)
    # Otherwise, create dummy array (for function I/O)
    else:
        arySlct03 = 0

    # Import fourth criterion vtk file (one depth level)
    if lgcSlct04:
        if idxPrc == 0:
            print('---------Importing fourth criterion vtk file (one depth '
                  + 'level).')
        arySlct04 = load_vtk_multi(strVtkSlct04,
                                   strPrcdData,
                                   varNumLne,
                                   varNumDpth)
    # Otherwise, create dummy array (for function I/O):
    else:
        arySlct04 = 0

    # Import depth data vtk files
    if idxPrc == 0:
        print('---------Importing depth data vtk files.')
    # Number of input files (i.e. number of conditions):
    varNumCon = len(lstVtkDpth01)
    # List for input data:
    lstDpthData01 = [None] * varNumCon
    # Loop through input data files:
    for idxIn in range(0, varNumCon):
        # Import data from file:
        lstDpthData01[idxIn] = load_vtk_multi(lstVtkDpth01[idxIn],
                                              strPrcdData,
                                              varNumLne,
                                              varNumDpth)
        if idxPrc == 0:
            print('------------File ' + str(idxIn + 1) + ' out of '
                  + str(varNumCon))
    # *************************************************************************

    # *************************************************************************
    # *** Convert cope to percent signal change

    # According to the FSL documentation
    # (https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FEAT/UserGuide), the PEs can be
    # scaled to signal change with respect to the mean (over time within
    # voxel): "This is achieved by scaling the PE or COPE values by (100*) the
    # peak-peak height of the regressor (or effective regressor in the case of
    # COPEs) and then by dividing by mean_func (the mean over time of
    # filtered_func_data)." However, this PSC would be with respect to the
    # temporal mean, but we are interested in the PSC with respect to
    # pre-stimulus baseline. Thus, we extract the difference (a scaling
    # factor) between these two (i.e. temporal mean vs. pre-stimulus baseline)
    # from the respective FSL design matrix (`design.mat` in the FEAT
    # directory). The scaling factor is approximately 1.4 (slightly different
    # values for sustained and transient predictors, but close enough not to
    # matter). This scaling factor needs to be applied after the procedure
    # described in the FSL documentation. Thus, the final PSC is calculated as
    # follows: `(PE * (100 * peak-peak height) / tmean) * 1.4`. The pp-height
    # is obtained from `design.mat`.

    # Only perform scaling if the data is from an FSL cope file:
    if (('cope' in lstVtkDpth01[0]) or ('_pe' in lstVtkDpth01[0])):
        if idxPrc == 0:
            print('---------Convert cope to percent signal change.')

        # The peak-peak height depends on the predictor (i.e. condition).
        if 'sst' in lstVtkDpth01[0]:
            varPpheight = 1.268049
        elif 'trn' in lstVtkDpth01[0]:
            varPpheight = 0.2269044
        else:
            if idxPrc == 0:
                print(('------------WARNING: Cannot determine condition from '
                       + 'file name, peak-peak height of the regressor is set '
                       + 'to 1.'))
            varPpheight = 1.0

        # Loop through input data files:
        for idxIn in range(0, varNumCon):

            # Get PEs:
            aryTmp = lstDpthData01[idxIn].astype(np.float64)

            # In order to avoid division by zero, avoid zero-voxels:
            lgcTmp = np.not_equal(arySlct03, 0.0)

            # Apply PSC scaling, as described above:
            aryTmp[lgcTmp] = np.multiply(
                                         np.divide(
                                                   np.multiply(aryTmp[lgcTmp],
                                                               (100.0
                                                                * varPpheight)
                                                               ),
                                                   arySlct03[lgcTmp]),
                                         1.0  # 1.4
                                         )

            # Put scaled PEs back into list (now PSC with respect to
            # pre-stimulus baseline):
            lstDpthData01[idxIn] = aryTmp
    # *************************************************************************

    # *************************************************************************
    # *** Select vertices

    lstDpthData01, varNumInc, vecInc = \
        slct_vrtcs(varNumCon,           # Number of conditions
                   lstDpthData01,       # List with depth-sampled data I
                   lgcSlct01,           # Criterion 1 - Yes or no?
                   aryRoiVrtx,          # Criterion 1 - Data (ROI)
                   lgcSlct02,           # Criterion 2 - Yes or no?
                   arySlct02,           # Criterion 2 - Data
                   varThrSlct02,        # Criterion 2 - Threshold
                   lgcSlct03,           # Criterion 3 - Yes or no?
                   arySlct03,           # Criterion 3 - Data
                   varThrSlct03,        # Criterion 3 - Threshold
                   lgcSlct04,           # Criterion 4 - Yes or no?
                   arySlct04,           # Criterion 4 - Data
                   tplThrSlct04,        # Criterion 4 - Threshold
                   idxPrc)              # Process ID
    # *************************************************************************

    # *************************************************************************
    # *** Create VTK mesh mask

    if idxPrc == 0:
        print('---------Creating VTK mesh mask.')

    # We would like to be able to visualise the selected vertices on the
    # cortical surface, i.e. on a vtk mesh.
    vtk_msk(strSubId,         # Data struc - Subject ID
            lstVtkDpth01[0],  # Data struc - Path first data vtk file
            strPrcdData,      # Data struc - Str. prcd. VTK data
            varNumLne,        # Data struc - Lns. prcd. data VTK
            strCsvRoi,        # Data struc - ROI CSV fle (outpt. naming)
            vecInc,           # Vector with included vertices
            strMetaCon)       # Metacondition (stimulus or periphery)
    # *************************************************************************

    # *************************************************************************
    # *** Calculate mean & conficende interval

    if idxPrc == 0:
        print('---------Plot results - mean over vertices.')

    # Prepare arrays for results (mean & confidence interval):
    aryDpthMean = np.zeros((varNumCon, varNumDpth))
    aryDpthConf = np.zeros((varNumCon, varNumDpth))

    # Fill array with data - loop through input files:
    for idxIn in range(0, varNumCon):

        # Loop through depth levels:
        for idxDpth in range(0, varNumDpth):

            # Avoid warning in case of empty array (i.e. no vertices included
            # in ROI for current ROI/subject/hemisphere):
            if np.greater(np.sum(vecInc), 0):

                # Retrieve all vertex data for current input file & current
                # depth level:
                aryTmp = lstDpthData01[idxIn][:, idxDpth]

                # Calculate mean over vertices:
                varTmp = np.mean(aryTmp)

            else:

                # No vertices in ROI:
                varTmp = 0.0

            # Place mean in array:
            aryDpthMean[idxIn, idxDpth] = varTmp

            # Calculate 95% confidence interval for the mean, obtained by
            # multiplying the standard error of the mean (SEM) by 1.96. We
            # obtain  the SEM by dividing the standard deviation by the
            # squareroot of the sample size n. We get n by taking 1/8 of the
            # number of vertices,  which corresponds to the number of voxels in
            # native resolution.
            varTmp = np.multiply(np.divide(np.std(aryTmp),
                                           np.sqrt(aryTmp.size * 0.125)),
                                 1.96)
            # Place confidence interval in array:
            aryDpthConf[idxIn, idxDpth] = varTmp

            # Calculate standard error of the mean.
            # varTmp = np.divide(np.std(aryTmp),
            #                    np.sqrt(aryTmp.size * 0.125))
            # Place SEM in array:
            # aryDpthConf[idxIn, idxDpth] = varTmp

            # Calculate standard deviation over vertices:
            # varTmp = np.std(aryTmp)

            # Place standard deviation in array:
            # aryDpthConf[idxIn, idxDpth] = varTmp

    # Normalise by division:
    if lgcNormDiv:

        if idxPrc == 0:
            print('---------Normalisation by division.')

        # Vector for subtraction:
        # vecSub = np.array(aryDpthMean[varNormIdx, :], ndmin=2)
        # Divide all rows by reference row:
        # aryDpthMean = np.divide(aryDpthMean, vecSub)

        # Calculate 'grand mean', i.e. the mean PE across depth levels and
        # conditions:
        varGrndMean = np.mean(aryDpthMean)
        # varGrndMean = np.median(aryDpthMean)

        # Divide all values by the grand mean:
        aryDpthMean = np.divide(np.absolute(aryDpthMean), varGrndMean)
        aryDpthConf = np.divide(np.absolute(aryDpthConf), varGrndMean)
    # *************************************************************************

    # *************************************************************************
    # *** Create plot

    if False:

        # File name for figure:
        strPltOt = strPltOtPre + strSubId + strPltOtSuf

        # Title, including information about number of vertices:
        strTitleTmp = (strTitle
                       + ', '
                       + str(varNumInc)
                       + ' vertices')

        plt_dpth_prfl(aryDpthMean,  # Data: aryData[Condition, Depth]
                      aryDpthConf,  # Error shading: aryError[Condition, Depth]
                      varNumDpth,   # Number of depth levels (on the x-axis)
                      varNumCon,    # Number of conditions (separate lines)
                      varDpi,       # Resolution of the output figure
                      varYmin,      # Minimum of Y axis
                      varYmax,      # Maximum of Y axis
                      False,        # Boolean: whether to convert y axis to %
                      lstConLbl,    # Labels for conditions (separate lines)
                      strXlabel,    # Label on x axis
                      strYlabel,    # Label on y axis
                      strTitleTmp,  # Figure title
                      True,         # Boolean: whether to plot a legend
                      strPltOt)
    # *************************************************************************

    # *************************************************************************
    # *** Return

    # Output list:
    lstOut = [idxPrc,
              aryDpthMean,
              varNumInc]

    queOut.put(lstOut)
示例#48
0
            im = ns.add_noise(original, noise_type, noise, max_val=255 / scaling)
            misc.save_image(im * scaling, "{}.png".format(noisyImName))
            im, scaling2 = misc.get_image("{}.png".format(noisyImName), n)
            im *= scaling2 / scaling
        shift = im.ravel().min() - 1e-10
        if shift > 0:
            shift = 0

        sparsities = []
        method_outputs = initialize_method_outputs(W_SOFT, W_HARD, SOFT, HARD)

        # compute sparsity paths
        for lamb in lambdas:
            _, Z, obj = ot_sparse_projection. \
                wasserstein_image_filtering_invertible_dictionary(im - shift, filter_handler, gamma, lamb * mask)
            sparsity_pattern = np.not_equal(0, Z)
            _, Z_wasserstein_hard, obj_hard = ot_sparse_projection. \
                OtFilteringSpecificPattern(filter_handler, gamma, sparsity_pattern, ).projection(im - shift)
            add_values(method_outputs, W_SOFT, Z)
            add_values(method_outputs, W_HARD, Z_wasserstein_hard)
            sparsity = misc.get_sparsity(Z)
            sparsities.append(sparsity)
            Y_l2, Z_l2 = l2.sparse_projection(im, filter_handler, sparsity)
            Y_l2_hard, Z_l2_hard = l2.hard_thresholding(im, filter_handler, sparsity)
            add_values(method_outputs, HARD, Z_l2_hard)
            add_values(method_outputs, SOFT, Z_l2)

        # plot scores
        for key, sim in similarities.items():
            table_scores[noise_type][noise][key] = {}
            plt.figure(figsize=[9., 3.])
示例#49
0
def num_changed(old, new):
    return np.sum(np.not_equal(old, new))
示例#50
0
def onp_not_equal(a, b):
    return onp.not_equal(a, b)
示例#51
0
def test_batch_iter_with_shuffle():
    data = np.arange(36).reshape(12, 3)
    bi = iterator.BatchIterator(4, True)
    data2 = np.vstack([items[0] for items in bi(data)])
    assert_equal(np.any(np.not_equal(data, data2)), True)
    assert_array_equal(data, np.sort(data2, axis=0))
示例#52
0
    def get_batch(self, batch_size):
        in_data = []
        slot_data = []
        slot_weight = []
        length = []
        intents = []

        batch_in = []
        batch_slot = []
        max_len = 0

        #used to record word(not id)
        in_seq = []
        slot_seq = []
        intent_seq = []
        for i in range(batch_size):
            inp = self.__fd_in.readline()
            if inp == '':
                self.end = 1
                break
            slot = self.__fd_slot.readline()
            intent = self.__fd_intent.readline()
            inp = inp.rstrip()
            slot = slot.rstrip()
            intent = intent.rstrip()

            in_seq.append(inp)
            slot_seq.append(slot)
            intent_seq.append(intent)

            iii = inp
            sss = slot
            inp = sentenceToIds(inp, self.__in_vocab)
            slot = sentenceToIds(slot, self.__slot_vocab)
            intent = sentenceToIds(intent, self.__intent_vocab)
            batch_in.append(np.array(inp))
            batch_slot.append(np.array(slot))
            length.append(len(inp))
            intents.append(intent[0])
            if len(inp) != len(slot):
                print(iii, sss)
                print(inp, slot)
                exit(0)
            if len(inp) > max_len:
                max_len = len(inp)

        length = np.array(length)
        intents = np.array(intents)
        #print(max_len)
        #print('A'*20)
        for i, s in zip(batch_in, batch_slot):
            in_data.append(padSentence(list(i), max_len, self.__in_vocab))
            slot_data.append(padSentence(list(s), max_len, self.__slot_vocab))
            #print(s)
        in_data = np.array(in_data)
        slot_data = np.array(slot_data)
        #print(in_data)
        #print(slot_data)
        #print(type(slot_data))
        for s in slot_data:
            weight = np.not_equal(s, np.zeros(s.shape))
            weight = weight.astype(np.float32)
            slot_weight.append(weight)
        slot_weight = np.array(slot_weight)
        return in_data, slot_data, slot_weight, length, intents, in_seq, slot_seq, intent_seq
示例#53
0
with open('/data/y_raw.pickle', 'rb') as f:
    output = pickle.load(f)
print(len(output))
print("Data loaded\n")

# Split data
train_x, test_x = train_test_split(input, test_size=0.1, random_state=42)
train_y, test_y = train_test_split(output, test_size=0.1, random_state=42)

clf = Pipeline([('classif', RandomForestClassifier())])

clf.fit(train_x, train_y)

predicted = clf.predict(test_x)
print('Correct predictions: {:4.2f}'.format(np.mean(predicted == test_y)))

print("Accuracy:", metrics.accuracy_score(test_y, predicted))
print("Precision",
      metrics.precision_score(test_y, predicted, average='weighted'))
print("Recall:", metrics.recall_score(test_y, predicted, average='weighted'))
print("F1:", metrics.f1_score(test_y, predicted, average='weighted'))

# Writing the hamming loss formula
incorrect = np.not_equal(predicted, test_y)
misclass = np.count_nonzero(incorrect)
hamm_loss = (np.sum(misclass / n_classes)) / len(test_x)

print("Hamming loss:,", hamm_loss)
print("Verify hamming loss value against sklearn library:",
      metrics.hamming_loss(test_y, predicted))
示例#54
0
f = file.as_matrix()


# The training dataset in array form
X_train = f[:split, 1:]
y_train = f[:split, 0]

# The Cross validation set
X_cv = f[split: , 1:]
y_cv = f[split: , 0]



# Declaring error arrays
error_cv = []

import numpy as np
for layer_size in [200, 225, 250, 275]:
	for reg in [10, 25, 50 , 75 , 100]:

    	nn = MLPClassifier(solver='lbfgs' , alpha = reg , activation='logistic' , hidden_layer_sizes=(layer_size,))
    	nn.fit( X_train[ :m , ] , y_train[ :m , ] )

    	y_pred  = nn.predict(X_cv)
    	error_cv.append( sum( np.not_equal(y_pred ,y_cv ) )/len(y_pred) )


max_index = error_cv.index(np.amin(error_cv))
print("Layer_size: " ,min_index%4)
print("alpha: " , min_index/4)
def read_data(filename):
    """Reads Intan Technologies RHD2000 data file generated by evaluation board GUI.
    
    Data are returned in a dictionary, for future extensibility.
    """

    tic = time.time()
    fid = open(filename, 'rb')
    filesize = os.path.getsize(filename)

    header = read_header(fid)

    print('Found {} amplifier channel{}.'.format(
        header['num_amplifier_channels'],
        plural(header['num_amplifier_channels'])))
    print('Found {} auxiliary input channel{}.'.format(
        header['num_aux_input_channels'],
        plural(header['num_aux_input_channels'])))
    print('Found {} supply voltage channel{}.'.format(
        header['num_supply_voltage_channels'],
        plural(header['num_supply_voltage_channels'])))
    print('Found {} board ADC channel{}.'.format(
        header['num_board_adc_channels'],
        plural(header['num_board_adc_channels'])))
    print('Found {} board digital input channel{}.'.format(
        header['num_board_dig_in_channels'],
        plural(header['num_board_dig_in_channels'])))
    print('Found {} board digital output channel{}.'.format(
        header['num_board_dig_out_channels'],
        plural(header['num_board_dig_out_channels'])))
    print('Found {} temperature sensors channel{}.'.format(
        header['num_temp_sensor_channels'],
        plural(header['num_temp_sensor_channels'])))
    print('')

    # Determine how many samples the data file contains.
    bytes_per_block = get_bytes_per_data_block(header)

    # How many data blocks remain in this file?
    data_present = False
    bytes_remaining = filesize - fid.tell()
    if bytes_remaining > 0:
        data_present = True

    if bytes_remaining % bytes_per_block != 0:
        raise Exception(
            'Something is wrong with file size : should have a whole number of data blocks'
        )

    num_data_blocks = int(bytes_remaining / bytes_per_block)

    num_amplifier_samples = 60 * num_data_blocks
    num_aux_input_samples = 15 * num_data_blocks
    num_supply_voltage_samples = 1 * num_data_blocks
    num_board_adc_samples = 60 * num_data_blocks
    num_board_dig_in_samples = 60 * num_data_blocks
    num_board_dig_out_samples = 60 * num_data_blocks

    record_time = num_amplifier_samples / header['sample_rate']

    if data_present:
        print(
            'File contains {:0.3f} seconds of data.  Amplifiers were sampled at {:0.2f} kS/s.'
            .format(record_time, header['sample_rate'] / 1000))
    else:
        print(
            'Header file contains no data.  Amplifiers were sampled at {:0.2f} kS/s.'
            .format(header['sample_rate'] / 1000))

    if data_present:
        # Pre-allocate memory for data.
        print('')
        print('Allocating memory for data...')

        data = {}
        if (header['version']['major'] == 1 and header['version']['minor'] >= 2
            ) or (header['version']['major'] > 1):
            data['t_amplifier'] = np.zeros(num_amplifier_samples, dtype=np.int)
        else:
            data['t_amplifier'] = np.zeros(num_amplifier_samples,
                                           dtype=np.uint)

        data['amplifier_data'] = np.zeros(
            [header['num_amplifier_channels'], num_amplifier_samples],
            dtype=np.uint)
        data['aux_input_data'] = np.zeros(
            [header['num_aux_input_channels'], num_aux_input_samples],
            dtype=np.uint)
        data['supply_voltage_data'] = np.zeros([
            header['num_supply_voltage_channels'], num_supply_voltage_samples
        ],
                                               dtype=np.uint)
        data['temp_sensor_data'] = np.zeros(
            [header['num_temp_sensor_channels'], num_supply_voltage_samples],
            dtype=np.uint)
        data['board_adc_data'] = np.zeros(
            [header['num_board_adc_channels'], num_board_adc_samples],
            dtype=np.uint)
        data['board_dig_in_data'] = np.zeros(
            [header['num_board_dig_in_channels'], num_board_dig_in_samples],
            dtype=np.uint)
        data['board_dig_in_raw'] = np.zeros(num_board_dig_in_samples,
                                            dtype=np.uint)
        data['board_dig_out_data'] = np.zeros(
            [header['num_board_dig_out_channels'], num_board_dig_out_samples],
            dtype=np.uint)
        data['board_dig_out_raw'] = np.zeros(num_board_dig_out_samples,
                                             dtype=np.uint)

        # Read sampled data from file.
        print('Reading data from file...')

        # Initialize indices used in looping
        indices = {}
        indices['amplifier'] = 0
        indices['aux_input'] = 0
        indices['supply_voltage'] = 0
        indices['board_adc'] = 0
        indices['board_dig_in'] = 0
        indices['board_dig_out'] = 0

        print_increment = 10
        percent_done = print_increment
        for i in range(num_data_blocks):
            read_one_data_block(data, header, indices, fid)

            # Increment indices
            indices['amplifier'] += 60
            indices['aux_input'] += 15
            indices['supply_voltage'] += 1
            indices['board_adc'] += 60
            indices['board_dig_in'] += 60
            indices['board_dig_out'] += 60

            fraction_done = 100 * (1.0 * i / num_data_blocks)
            if fraction_done >= percent_done:
                print('{}% done...'.format(percent_done))
                percent_done = percent_done + print_increment

        # Make sure we have read exactly the right amount of data.
        bytes_remaining = filesize - fid.tell()
        if bytes_remaining != 0:
            raise Exception('Error: End of file not reached.')

    # Close data file.
    fid.close()

    if (data_present):
        print('Parsing data...')

        # Extract digital input channels to separate variables.
        for i in range(header['num_board_dig_in_channels']):
            data['board_dig_in_data'][i, :] = np.not_equal(
                np.bitwise_and(
                    data['board_dig_in_raw'],
                    (1 << header['board_dig_in_channels'][i]['native_order'])),
                0)

        # Extract digital output channels to separate variables.
        for i in range(header['num_board_dig_out_channels']):
            data['board_dig_out_data'][i, :] = np.not_equal(
                np.bitwise_and(data['board_dig_out_raw'], (
                    1 << header['board_dig_out_channels'][i]['native_order'])),
                0)

        # Scale voltage levels appropriately.
        data['amplifier_data'] = np.multiply(
            0.195, (data['amplifier_data'].astype(np.int32) -
                    32768))  # units = microvolts
        data['aux_input_data'] = np.multiply(
            37.4e-6, data['aux_input_data'])  # units = volts
        data['supply_voltage_data'] = np.multiply(
            74.8e-6, data['supply_voltage_data'])  # units = volts
        if header['eval_board_mode'] == 1:
            data['board_adc_data'] = np.multiply(
                152.59e-6, (data['board_adc_data'].astype(np.int32) -
                            32768))  # units = volts
        else:
            data['board_adc_data'] = np.multiply(
                50.354e-6, data['board_adc_data'])  # units = volts
        data['temp_sensor_data'] = np.multiply(
            0.01, data['temp_sensor_data'])  # units = deg C

        # Check for gaps in timestamps.
        num_gaps = np.sum(
            np.not_equal(data['t_amplifier'][1:] - data['t_amplifier'][:-1],
                         1))
        if num_gaps == 0:
            print('No missing timestamps in data.')
        else:
            print(
                'Warning: {0} gaps in timestamp data found.  Time scale will not be uniform!'
                .format(num_gaps))

        # Scale time steps (units = seconds).
        data['t_amplifier'] = data['t_amplifier'] / header['sample_rate']
        data['t_aux_input'] = data['t_amplifier'][range(
            0, len(data['t_amplifier']), 4)]
        data['t_supply_voltage'] = data['t_amplifier'][range(
            0, len(data['t_amplifier']), 60)]
        data['t_board_adc'] = data['t_amplifier']
        data['t_dig'] = data['t_amplifier']
        data['t_temp_sensor'] = data['t_supply_voltage']

        # If the software notch filter was selected during the recording, apply the
        # same notch filter to amplifier data here.
        if header['notch_filter_frequency'] > 0:
            print('Applying notch filter...')

            print_increment = 10
            percent_done = print_increment
            for i in range(header['num_amplifier_channels']):
                data['amplifier_data'][i, :] = notch_filter(
                    data['amplifier_data'][i, :], header['sample_rate'],
                    header['notch_filter_frequency'], 10)

                fraction_done = 100 * (i / header['num_amplifier_channels'])
                if fraction_done >= percent_done:
                    print('{}% done...'.format(percent_done))
                    percent_done += print_increment
    else:
        data = []

    # Move variables to result struct.
    result = data_to_result(header, data, data_present)

    print('Done!  Elapsed time: {0:0.1f} seconds'.format(time.time() - tic))
    return result
示例#56
0
文件: qctests.py 项目: cgodine/ACT
    def add_not_equal_to_test(self, var_name, limit_value, test_meaning=None,
                              test_assessment='Bad', test_number=None,
                              flag_value=False, limit_attr_name=None,
                              prepend_text=None):
        """
        Method to perform a not equal to test and add result to ancillary
        quality control variable. If ancillary quality control variable does
        not exist it will be created.

        Parameters
        ----------
        var_name : str
            Data variable name.
        limit_value : int or float or None
            Limit value to use in test. The value will be written
            to the quality control variable as an attribute. If set
            to None will return without setttin test.
        test_meaning : str
            The optional text description to add to flag_meanings
            describing the test. Will add a default if not set.
        test_assessment : str
            Optional single word describing the assessment of the test.
            Will set a default if not set.
        test_number : int
            Optional test number to use. If not set will ues next
            available test number.
        flag_value : boolean
            Indicates that the tests are stored as integers
            not bit packed values in quality control variable.
        limit_attr_name : str
            Optional attribute name to store the limit_value under
            quality control ancillary variable.
        prepend_text : str
            Optional text to prepend to the test meaning.
            Example is indicate what institution added the test.

        Returns
        -------
        test_info : tuple
            A tuple containing test information including var_name, qc variable name,
            test_number, test_meaning, test_assessment

        """
        if limit_value is None:
            return

        if limit_attr_name is None:
            if test_assessment == 'Suspect' or test_assessment == 'Indeterminate':
                attr_name = 'warn_not_equal_to'
            else:
                attr_name = 'fail_not_equal_to'
        else:
            attr_name = limit_attr_name

        if test_meaning is None:
            test_meaning = 'Data value not equal to {}.'.format(attr_name)

        if prepend_text is not None:
            test_meaning = ': '.join((prepend_text, test_meaning))

        with warnings.catch_warnings():
            warnings.filterwarnings("ignore", category=RuntimeWarning)
            index = np.not_equal(self._obj[var_name].values, limit_value)

        result = self._obj.qcfilter.add_test(
            var_name, index=index,
            test_number=test_number,
            test_meaning=test_meaning,
            test_assessment=test_assessment,
            flag_value=flag_value)

        # Ensure limit_value attribute is matching data type
        limit_value = np.array(limit_value, dtype=self._obj[var_name].values.dtype.type)

        qc_var_name = result['qc_variable_name']
        self._obj[qc_var_name].attrs[attr_name] = limit_value

        return result
示例#57
0
 def binary_op(self, op, rhs1, rhs2, where, args, stacklevel):
     if self.shadow:
         rhs1 = self.runtime.to_eager_array(
             rhs1, stacklevel=(stacklevel + 1)
         )
         rhs2 = self.runtime.to_eager_array(
             rhs2, stacklevel=(stacklevel + 1)
         )
         if where is not None and isinstance(where, NumPyThunk):
             where = self.runtime.to_eager_array(
                 where, stacklevel=(stacklevel + 1)
             )
     elif self.deferred is None:
         if where is not None and isinstance(where, NumPyThunk):
             self.check_eager_args((stacklevel + 1), rhs1, rhs2, where)
         else:
             self.check_eager_args((stacklevel + 1), rhs1, rhs2)
     if self.deferred is not None:
         self.deferred.binary_op(
             op, rhs1, rhs2, where, args, stacklevel=(stacklevel + 1)
         )
     else:
         if op == NumPyOpCode.ADD:
             np.add(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray)
                 else where.array,
             )
         elif op == NumPyOpCode.LOGICAL_AND:
             np.logical_and(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray)
                 else where.array,
             )
         elif op == NumPyOpCode.DIVIDE:
             np.divide(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray)
                 else where.array,
             )
         elif op == NumPyOpCode.EQUAL:
             np.equal(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray)
                 else where.array,
             )
         elif op == NumPyOpCode.FLOOR_DIVIDE:
             np.floor_divide(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray)
                 else where.array,
             )
         elif op == NumPyOpCode.GREATER_EQUAL:
             np.greater_equal(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray)
                 else where.array,
             )
         elif op == NumPyOpCode.GREATER:
             np.greater(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray)
                 else where.array,
             )
         # elif op == NumPyOpCode.SHIFT_LEFT:
         #    np.left_shift(rhs1.array, rhs2.array, out=self.array,
         #            where=where if not isinstance(where, EagerArray)
         #                        else where.array)
         # elif op == NumPyOpCode.SHIFT_RIGHT:
         #    np.right_shift(rhs1.array, rhs2.array, out=self.array,
         #            where=where if not isinstance(where, EagerArray)
         #                        else where.array)
         elif op == NumPyOpCode.MOD:
             np.mod(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray)
                 else where.array,
             )
         elif op == NumPyOpCode.MULTIPLY:
             np.multiply(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray)
                 else where.array,
             )
         elif op == NumPyOpCode.LOGICAL_OR:
             np.logical_or(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray)
                 else where.array,
             )
         elif op == NumPyOpCode.POWER:
             np.power(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray)
                 else where.array,
             )
         elif op == NumPyOpCode.SUBTRACT:
             np.subtract(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray)
                 else where.array,
             )
         elif op == NumPyOpCode.LOGICAL_XOR:
             np.logical_xor(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray)
                 else where.array,
             )
         elif op == NumPyOpCode.LESS_EQUAL:
             np.less_equal(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray)
                 else where.array,
             )
         elif op == NumPyOpCode.LESS:
             np.less(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray)
                 else where.array,
             )
         elif op == NumPyOpCode.MAXIMUM:
             np.maximum(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray)
                 else where.array,
             )
         elif op == NumPyOpCode.MINIMUM:
             np.minimum(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray)
                 else where.array,
             )
         elif op == NumPyOpCode.NOT_EQUAL:
             np.not_equal(
                 rhs1.array,
                 rhs2.array,
                 out=self.array,
                 where=where
                 if not isinstance(where, EagerArray)
                 else where.array,
             )
         else:
             raise RuntimeError("unsupported binary op " + str(op))
         self.runtime.profile_callsite(stacklevel + 1, False)
示例#58
0
    def fit(self):
        """
            Trains a model
        """
        if self.optimizers is None:
            self.setup_optimizers()

        num_train = self.dataset.train_X.shape[0]
        
        loss_history = []
        train_acc_history = []
        val_acc_history = []
        first_layer_W = {}

        for param_name, _ in self.model.params().items():
            first_layer_W[param_name]=[]
        
        for epoch in range(self.num_epochs):
            shuffled_indices = np.arange(num_train)
            np.random.shuffle(shuffled_indices)
            sections = np.arange(self.batch_size, num_train, self.batch_size)
            batches_indices = np.array_split(shuffled_indices, sections)
            batch_losses = []

            for batch_indices in batches_indices:
                # TODO Generate batches based on batch_indices and
                # use model to generate loss and gradients for all
                # the params

                batch = self.dataset.train_X[batch_indices]
                label = self.dataset.train_y[batch_indices]

                loss = self.model.compute_loss_and_gradients(batch, label)
                batch_losses.append(loss)
                
                for param_name, param in self.model.params().items():
                    optimizer = self.optimizers[param_name]
                    # print('PARAM PRE', param_name)
                    first_layer_W[param_name].append(param.value.sum())
                    param.value = optimizer.update(param.value, param.grad, self.learning_rate)
                    # print('PARAM POST', param_name)
                    # print(param.value.sum(), '\n=========')
                    
        
            if np.not_equal(self.learning_rate_decay, 1.0):

                # TODO: Implement learning rate decay
                self.learning_rate *= self.learning_rate_decay
                pass

            ave_loss = np.mean(batch_losses)

            train_accuracy = self.compute_accuracy(self.dataset.train_X,
                                                   self.dataset.train_y)

            val_accuracy = self.compute_accuracy(self.dataset.val_X,
                                                 self.dataset.val_y)

            print("Loss: %f, Train accuracy: %f, val accuracy: %f" %
                  (batch_losses[-1], train_accuracy, val_accuracy))

            loss_history.append(ave_loss)
            train_acc_history.append(train_accuracy)
            val_acc_history.append(val_accuracy)

        return loss_history, train_acc_history, val_acc_history, first_layer_W
示例#59
0
    # *** Normalisation

    if lgcNorm:

        # Get prestimulus baseline:
        aryBse = np.copy(aryTmpBlcks[:, :, :, :,
                                     int(varVolsPre + tplBase[0]):
                                     int(varVolsPre + tplBase[1])]
                         ).astype(np.float32)

        # Mean for each voxel over time (i.e. over the pre-stimulus
        # baseline):
        aryBseMne = np.mean(aryBse, axis=4).astype(np.float32)

        # Get indicies of voxels that have a non-zero prestimulus baseline:
        aryNonZero = np.not_equal(aryBseMne, 0.0)

        # Divide all voxels that are non-zero in the pre-stimulus baseline by
        # the prestimulus baseline:
        aryTmpBlcks[aryNonZero] = np.divide(aryTmpBlcks[aryNonZero],
                                            aryBseMne[aryNonZero, None]
                                            ).astype(np.float32)

    # -------------------------------------------------------------------------
    # *** Save segment

    if lgcSegs:

        # Loop through runs again in order to save segments:
        for index_03 in range(0, varTmpNumBlck):
示例#60
0
def vis_image_pair_opencv(im_list,
                          boxes_list,
                          segms_list=None,
                          keypoints_list=None,
                          track=None,
                          thresh=0.9,
                          kp_thresh=2,
                          track_thresh=0.7,
                          show_box=False,
                          dataset=None,
                          show_class=False,
                          show_track=False,
                          show_track_ids=False,
                          colors=None,
                          color_inds_list=None):
    """Visualize object associations in an image pair."""

    classes_list = []
    keep_idx = [[], []]
    sorted_inds_list = []
    ret = []
    color_inds_list_new = [None, None]
    masks_list = []

    for i, im in enumerate(im_list):
        boxes = boxes_list[i]
        segms = None if segms_list is None else segms_list[i]
        keypoints = None if keypoints_list is None else keypoints_list[i]

        if isinstance(boxes, list):
            boxes, segms, keypoints, classes = convert_from_cls_format(
                boxes, segms, keypoints)
            boxes_list[i] = boxes
            segms_list[i] = segms
            keypoints_list[i] = keypoints
            classes_list.append(classes)

        if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
            if im is not None:
                ret.append(im)
            continue

        if segms is not None and len(segms) > 0:
            masks_list.append(mask_util.decode(segms))

        # Display in largest to smallest order to reduce occlusion
        areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
        sorted_inds = np.argsort(-areas)
        sorted_inds_list.append(sorted_inds)

        for idx in sorted_inds:
            bbox = boxes[idx, :4]
            score = boxes[idx, -1]
            if score >= thresh:
                keep_idx[i].append(idx)

    n_rois = len(boxes_list[0])
    m_rois = len(boxes_list[1])

    assert (len(sorted_inds_list[0]) == n_rois)
    assert (len(sorted_inds_list[1]) == m_rois)
    assert (track.shape == (n_rois, m_rois))

    if color_inds_list is None:
        color_inds_list = [None, None]
    for i in xrange(2):
        if color_inds_list[i] is not None:
            assert (len(color_inds_list[i]) == len(boxes_list[i]))
        else:
            color_inds_list[i] = [None] * len(boxes_list[i])
        color_inds_list_new[i] = [None] * len(boxes_list[i])

    assign_inds_one = []

    track_prob_mat = np.zeros((n_rois, m_rois))
    for i in keep_idx[0]:
        for j in keep_idx[1]:
            track_prob_mat[i, j] = track[m_rois * i + j]
    track_prob_mat = np.where(track_prob_mat > track_thresh, track_prob_mat,
                              np.zeros((n_rois, m_rois)))
    track_prob_mat = np.where(
        np.array([[class_one == class_two for class_two in classes_list[1]]
                  for class_one in classes_list[0]]), track_prob_mat,
        np.zeros((n_rois, m_rois)))
    assign_inds_list = linear_sum_assignment(-track_prob_mat)

    if colors is None:
        colors = distinct_colors(min(len(keep_idx[0]), len(keep_idx[1])))

    for i, im in enumerate(im_list):
        boxes = boxes_list[i]
        classes = classes_list[i]
        assign_inds = assign_inds_list[i]
        if i == 1:
            color_inds_list[0] = color_inds_list_new[0]
        for idx in keep_idx[i]:
            bbox = boxes[idx, :4]
            score = boxes[idx, -1]
            if idx not in assign_inds:
                continue
            i_other = (0 if i == 1 else 1)
            assign_inds_other = assign_inds_list[i_other]
            i_assign = assign_inds.tolist().index(idx)
            idx_other = assign_inds_other[i_assign]
            if idx_other not in keep_idx[i_other]:
                continue

            if i == 0:
                assign_inds_one.append(i_assign)

            i_color = color_inds_list[i][idx]
            if i_color is None:
                if color_inds_list[i_other][idx_other] is None:
                    color_inds = np.unique([
                        x
                        for x in color_inds_list[i_other] + color_inds_list[i]
                        if x is not None
                    ])
                    if len(color_inds):
                        i_small_idx = np.where(
                            np.not_equal(color_inds,
                                         np.array(range(len(color_inds)))))[0]
                        if len(i_small_idx):
                            i_color = i_small_idx[0]
                        else:
                            i_color = max(color_inds) + 1
                    else:
                        i_color = assign_inds_one.index(i_assign)
                    assert (i_color not in color_inds)
                else:
                    i_color = color_inds_list[i_other][idx_other]
            assert (i_color is not None)

            color_inds_list_new[i][idx] = i_color

            if im is not None:
                if i == 0:
                    track_prob = track_prob_mat[idx, idx_other]
                else:
                    track_prob = track_prob_mat[idx_other, idx]
                # show box (off by default)
                if track_prob < track_thresh:
                    color = (127.5, 127.5, 127.5)
                    thick = 1
                else:
                    color = colors[i_color]
                    thick = 2
                if show_box:
                    im = vis_bbox(im, (bbox[0], bbox[1], bbox[2] - bbox[0],
                                       bbox[3] - bbox[1]),
                                  color=color,
                                  thick=thick)

                # show class (off by default)
                class_str = ""
                if show_class:
                    class_str = get_class_string(classes[idx], score, dataset)
                    if show_track:
                        class_str += " | "
                if show_track:
                    class_str += "{:.2f}".format(track_prob)
                    if show_track_ids:
                        class_str += ", {} --> {}".format(idx, idx_other)
                if show_class or show_track:
                    im = vis_class(im, (bbox[0], bbox[1] - 2), class_str)

                # show mask
                if segms is not None and len(segms) > idx:
                    color_mask = np.array(colors[i_color])
                    im = vis_mask(im, masks_list[i][..., idx], color_mask)

                # show keypoints
                if keypoints is not None and len(keypoints) > idx:
                    im = vis_keypoints(im, keypoints_list[i][idx], 2)

        if im is not None:
            im = vis_class(im, (10, 20), str(i))
            ret.append(im)

    ret.append(track_prob_mat)
    ret += color_inds_list_new

    return ret