Ejemplo n.º 1
0
def Compare_Dihedral(Dihedral):
    dx = 0.0001
    N = len(Dihedral)
    print N
    Dih = np.arange(0, 2*3.1415, dx)
    U1 = np.full_like(Dih, 0.0)
    U2 = np.full_like(Dih, 0.0)
    U3 = np.full_like(Dih, 0.0)
    
    for i in range(N):
        U1 += Dihedral[i]*np.cos(Dih)**i
    
    for i in range(5):
        print i
        U2 += Dihedral[i]*np.cos(Dih)**i
    
    Popt, Pcov = curve_fit(Multi, Dih, U1)

    for i in range(5):
        U3 += Popt[i]*np.cos(Dih)**i
    
    print Popt
    plt.figure()
    plt.plot(Dih, U1, label = 'Full Potential')
    plt.plot(Dih, U2, label = 'Truncated Approximation')
    plt.plot(Dih, U3, label = 'Optimized Approximation')
    plt.ylim((U2.min(), U2.max()))
    plt.xlim((0.0, 2*3.1415))
    plt.title('P2P1P1P2', fontsize = 30)
    plt.xlabel('Dihedral Angle (radians)', fontsize = 20)
    plt.ylabel('Energy (Kcal/mol)', fontsize = 20)
    plt.legend()
    plt.show()
    return
Ejemplo n.º 2
0
def Gen_PDF_CDF_OPLS( V, Beta ):
	"""
	This function takes in a numpy array V that containes the energetic coefficients for the OPLS style dihedral potential of the form:
			U = (1/2)V1(1+cos(phi)) + (1/2)V2(1-cos(2phi)) + (1/2)V3(1+cos(3phi)) + ....
	It then uses Boltzmann statistics along with the inverse temperature Beta to generate a PDF and CDF of the dihedral angle
	
	The output is two numpy arrays that represent the PDF and CDF associated with this potential energy function
	"""
	dx = 0.0001
	x = np.arange(0, 6.28, dx) # Generate discretized values for x (phi)
	U = np.full_like(x, 0.0) # Initialize potential energy array
	PDF = np.full_like(x, 0.0) # Initialize PDF array
	CDF_NN = np.full_like(x, 0.0) # Initialize non-normalized CDF array
	CDF = np.full_like(x, 0.0) # Initialize normalized CDF array
	norm = 0
	L = len(x.tolist()) 
	U = 0.5*(V[0]*(1 + np.cos(x)) + V[1]*(1 - np.cos(2*x)) + V[2]*(1 + np.cos(3*x)) + V[3]*(1 - np.cos(4*x)))
	PDF = np.exp(-U*Beta)
	
	for i in range(L-1):
		CDF_NN[i+1] = CDF_NN[i] + PDF[i]*dx
	
	for i in range(L):
		PDF[i] = PDF[i]/CDF_NN[-1]
		norm += PDF[i]*dx
	
	for i in range(L-1):
		CDF[i+1] = CDF[i] + PDF[i]*dx
		
	return PDF, CDF
Ejemplo n.º 3
0
def Compare_Angle( Angle):
    dx = 0.0001
    M = [0, 2 , 3, 4 , 5 , 6, 8, 10, 12, 14, 16, 18, 20, 22, 24]
    N = len(Angle) - 1
    Th0 = Angle[0]*(3.1415/180.)
    dTh = 2.0
    Th = np.arange(Th0-dTh, Th0 + dTh, dx)
    U1 = np.full_like(Th, 0.0)
    U2 = np.full_like(Th, 0.0)
    
    for i in range(1, N+1):
        U1 += Angle[i]*(Th - Th0)**M[i-1]
    
    #U2 = Angle[2]*(Th - Th0)**2 + Angle[3]*(Th - Th0)**3 + Angle[4]*(Th-Th0)**4
    U2 =1000.*(Th - Th0)**2
    plt.figure()
    plt.plot(Th, U1, label = 'Full Potential')
    plt.plot(Th, U2, label = 'Harmonic Approximation')
    plt.ylim((U1.min(), U2.max() ))
    plt.xlim((Th0-dTh, Th0 + dTh))
    plt.title('P1P1P2')
    plt.xlabel('Angle (Radians)', fontsize = 20)
    plt.ylabel('Potential Energy (Kcal/mol)', fontsize = 20)
    plt.legend()
    plt.show()

    return  
Ejemplo n.º 4
0
def Compare_Bond( Bond , title):
    dx = 0.0001
    N = len(Bond) - 1
    DR = .25
    r = np.arange(Bond[0]-DR, Bond[0]+DR, dx)
    U1 = np.full_like(r, 0.0)
    U2 = np.full_like(r, 0.0)
    
    for i in range(1,N+1):
        U1 += Bond[i]*(r - Bond[0])**(i+1)
        print i+1
        
    U2 = Bond[1]*(r - Bond[0])**2 + Bond[2]*(r-Bond[0])**3 + Bond[3]*(r-Bond[0])**4
    
    plt.figure()
    plt.plot(r, U1, label = 'Full Potential')
    plt.plot(r, U2, label = 'Class2 Approximation')
    plt.xlim((Bond[0]-DR,Bond[0]+DR))
    plt.ylim((U1.min(), U1.max()))
    plt.title('%s' % title, fontsize = 30)
    plt.xlabel('Bond Length (Angstrom)', fontsize = 20)
    plt.ylabel('Potential Energy (Kcal/mol)', fontsize = 20)
    plt.legend()
    plt.show()
    return
Ejemplo n.º 5
0
def assignReads(anchor, highestPeak, clusterSize, blockCount):
    global tagCount
    global clusterStart
    readMeans = np.empty(tagCount)
    readHeights = np.empty(tagCount)
    readMeans = np.full_like(readMeans, -1, dtype=np.double)
    readHeights = np.full_like(readHeights, -1, dtype=np.double)
    
    meanCounter = 0

    counterNew = 0
    counterOld = -1

    while counterOld != counterNew:
        dev = stddev(readMeans, readHeights, tagCount)
        counterOld = counterNew
        for start in anchor:
            if start.block == -1:
                mean = ((start.start + start.end) / 2) - clusterStart
                variance = args.sizescale * (abs(start.end - start.start) / 2)

                if (((mean - variance - dev) <= highestPeak and (mean + variance + dev) >= highestPeak) or (mean >= (highestPeak - args.merge) and mean <= (highestPeak + args.merge))):
                    readMeans[meanCounter] = mean
                    readHeights[meanCounter] = start.height
                    meanCounter += 1
                    start.block = blockCount
                    counterNew += 1

    return counterNew
Ejemplo n.º 6
0
def calculate_risk(vector):
    init_dollars = 100          # change this if you want
    DOB = days_of_bettin = 82  # change this if you want
    num_runs = 4096            # change this if you want
    risks = np.array(np.linspace(0.01,0.99,99)).T # change this too, but be careful
    risks = risks.reshape((risks.size, 1))
    prob, odds = vector[0], vector[1]
    dollars = np.full_like(risks, init_dollars)
    talley = np.full_like(risks, 0)
    multiplier = odds_to_pct(odds)
    if prob*multiplier - (1-prob) < 0:
        return 0
    else:
        for _ in xrange(num_runs):
            for day in xrange(DOB):
                outcome = np.random.random()
                if outcome < prob:
                    dollars += dollars * multiplier * risks
                else:
                    dollars -= dollars * risks
            if talley.all() == np.full_like(risks, 0).all():
                talley = dollars
            else:
                talley = np.c_[talley, dollars]
            dollars = np.full_like(risks, init_dollars)
        expectation = np.median(talley, axis=1)
        index = np.argmax(expectation)
        return risks[index][0]
Ejemplo n.º 7
0
 def test_manual_bounds(self, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     for dtype in (torch.float, torch.double):
         # get a test module
         train_x = torch.tensor([[1.0, 2.0, 3.0]], device=device, dtype=dtype)
         train_y = torch.tensor([4.0], device=device, dtype=dtype)
         likelihood = GaussianLikelihood()
         model = ExactGP(train_x, train_y, likelihood)
         model.covar_module = RBFKernel(ard_num_dims=3)
         model.mean_module = ConstantMean()
         model.to(device=device, dtype=dtype)
         mll = ExactMarginalLogLikelihood(likelihood, model)
         # test the basic case
         x, pdict, bounds = module_to_array(
             module=mll, bounds={"model.covar_module.raw_lengthscale": (0.1, None)}
         )
         self.assertTrue(np.array_equal(x, np.zeros(5)))
         expected_sizes = {
             "likelihood.noise_covar.raw_noise": torch.Size([1]),
             "model.covar_module.raw_lengthscale": torch.Size([1, 3]),
             "model.mean_module.constant": torch.Size([1]),
         }
         self.assertEqual(set(pdict.keys()), set(expected_sizes.keys()))
         for pname, val in pdict.items():
             self.assertEqual(val.dtype, dtype)
             self.assertEqual(val.shape, expected_sizes[pname])
             self.assertEqual(val.device.type, device.type)
         lower_exp = np.full_like(x, 0.1)
         for p in ("likelihood.noise_covar.raw_noise", "model.mean_module.constant"):
             lower_exp[_get_index(pdict, p)] = -np.inf
         self.assertTrue(np.equal(bounds[0], lower_exp).all())
         self.assertTrue(np.equal(bounds[1], np.full_like(x, np.inf)).all())
Ejemplo n.º 8
0
def test_hpxgeom_coord_to_idx(nside, nested, coordsys, region, axes):
    import healpy as hp

    geom = HpxGeom(nside, nested, coordsys, region=region, axes=axes)
    lon = np.array([112.5, 135.0, 105.0])
    lat = np.array([75.3, 75.3, 74.6])
    coords = make_test_coords(geom, lon, lat)
    zidx = tuple([ax.coord_to_idx(t) for t, ax in zip(coords[2:], geom.axes)])

    if geom.nside.size > 1:
        nside = geom.nside[zidx]
    else:
        nside = geom.nside

    phi, theta = coords.phi, coords.theta
    idx = geom.coord_to_idx(coords)
    assert_allclose(hp.ang2pix(nside, theta, phi), idx[0])
    for i, z in enumerate(zidx):
        assert_allclose(z, idx[i + 1])

    # Test w/ coords outside the geometry
    lon = np.array([0.0, 5.0, 10.0])
    lat = np.array([75.3, 75.3, 74.6])
    coords = make_test_coords(geom, lon, lat)
    zidx = [ax.coord_to_idx(t) for t, ax in zip(coords[2:], geom.axes)]

    idx = geom.coord_to_idx(coords)
    if geom.region is not None:
        assert_allclose(np.full_like(coords[0], -1, dtype=int), idx[0])

    idx = geom.coord_to_idx(coords, clip=True)
    assert np.all(np.not_equal(np.full_like(coords[0], -1, dtype=int), idx[0]))
Ejemplo n.º 9
0
def optimize_img(init_img, solver_type, solver_param, max_iter, display, root_dir, net,
                 all_target_blob_names, targets, target_data_list):
    ensuredir(root_dir)

    solver_param.update({
        'maxiter': max_iter,
        'disp': True,
    })

    # Set initial value and reshape net
    set_data(net, init_img)
    x0 = np.ravel(init_img).astype(np.float64)

    mins = np.full_like(x0, -128)
    maxs = np.full_like(x0, 128)

    bounds = zip(mins, maxs)
    display_func = DisplayFunctor(net, root_dir, display)

    opt_res = optimize.minimize(
        objective_func,
        x0,
        args=(net, all_target_blob_names, targets, target_data_list),
        bounds=bounds,
        method=solver_type,
        jac=True,
        callback=display_func,
        options=solver_param,
    )
    print opt_res
Ejemplo n.º 10
0
def test_flux_unit_conversion():
    # By default the flux units should be set to Jy
    s = Spectrum1D(flux=np.array([26.0, 44.5]), spectral_axis=np.array([400, 500]) * u.nm)
    assert np.all(s.flux == np.array([26.0, 44.5]) * u.Jy)
    assert s.flux.unit == u.Jy

    # Simple Unit Conversion
    s = Spectrum1D(flux=np.array([26.0, 44.5]) * u.Jy, spectral_axis=np.array([400, 500])*u.nm)
    converted_value = s.to_flux(unit=u.uJy)[0]
    assert ((26.0 * u.Jy).to(u.uJy) == converted_value)

    # Make sure incompatible units raise UnitConversionError
    with pytest.raises(u.UnitConversionError):
        converted_value = s.to_flux(unit=u.m)

    # Pass custom equivalencies
    s = Spectrum1D(flux=np.array([26.0, 44.5]) * u.Jy, spectral_axis=np.array([400, 500]) * u.nm)
    eq = [[u.Jy, u.m,
          lambda x: np.full_like(np.array(x), 1000.0, dtype=np.double),
          lambda x: np.full_like(np.array(x), 0.001, dtype=np.double)]]
    converted_value = s.to_flux(unit=u.m, equivalencies=eq)[0]
    assert 1000.0 * u.m == converted_value

    # Check if suppressing the unit conversion works
    s = Spectrum1D(flux=np.array([26.0, 44.5]) * u.Jy, spectral_axis=np.array([400, 500]) * u.nm)
    s.to_flux("uJy", suppress_conversion=True)
    assert s.flux[0] == 26.0 * u.uJy
Ejemplo n.º 11
0
def fill_array( var1, var2 ):
    """
    fix fill_array such that it returns two numpy arrays of equal size

    use numpy.full_like

    """
    var1_a = np.asarray( var1 )
    var2_a = np.asarray( var2 )

    if var1_a.shape==():
        var1_a = np.asarray( [var1] )
    if var2_a.shape==():
        var2_a = np.asarray( [var2] )

    # Begin try/except block to handle all cases for filling an array
    while True:
        try:
            assert var1_a.shape == var2_a.shape
            break
        except: pass
        try:
            var1_a = np.full_like( var2_a, var1_a )
            break
        except: pass
        try:
            var2_a = np.full_like( var1_a, var2_a )
            break
        except: pass

        # If none of the cases properly handle it, throw error
        assert False, 'var1 and var2 must both be equal shape or size=1'

    return var1_a, var2_a
Ejemplo n.º 12
0
def _extract_current_results(data, curr, data_time):
    grid = data['models']['simulationGrid']
    plate_spacing = _meters(grid['plate_spacing'])
    zmesh = np.linspace(0, plate_spacing, grid['num_z'] + 1) #holds the z-axis grid points in an array
    beam = data['models']['beam']
    if data.models.simulationGrid.simulation_mode == '3d':
        cathode_area = _meters(grid['channel_width']) * _meters(grid['channel_height'])
    else:
        cathode_area = _meters(grid['channel_width'])
    RD_ideal = sources.j_rd(beam['cathode_temperature'], beam['cathode_work_function']) * cathode_area
    JCL_ideal = sources.cl_limit(beam['cathode_work_function'], beam['anode_work_function'], beam['anode_voltage'], plate_spacing) * cathode_area

    if beam['currentMode'] == '2' or (beam['currentMode'] == '1' and beam['beam_current'] >= JCL_ideal):
        curr2 = np.full_like(zmesh, JCL_ideal)
        y2_title = 'Child-Langmuir cold limit'
    else:
        curr2 = np.full_like(zmesh, RD_ideal)
        y2_title = 'Richardson-Dushman'
    return {
        'title': 'Current for Time: {:.4e}s'.format(data_time),
        'x_range': [0, plate_spacing],
        'y_label': 'Current [A]',
        'x_label': 'Z [m]',
        'points': [
            curr.tolist(),
            curr2.tolist(),
        ],
        'x_points': zmesh.tolist(),
        'y_range': [min(np.min(curr), np.min(curr2)), max(np.max(curr), np.max(curr2))],
        'y1_title': 'Current',
        'y2_title': y2_title,
    }
Ejemplo n.º 13
0
def watershed(image):
    hsv_image = color.rgb2hsv(image)

    low_res_image = rescale(hsv_image[:, :, 0], SCALE)
    local_mean = mean(low_res_image, disk(50))
    local_minimum_flat = np.argmin(local_mean)
    local_minimum = np.multiply(np.unravel_index(local_minimum_flat, low_res_image.shape), round(1 / SCALE))

    certain_bone_pixels = np.full_like(hsv_image[:, :, 0], False, bool)
    certain_bone_pixels[
    local_minimum[0] - INITIAL_WINDOW_SIZE/2:local_minimum[0]+INITIAL_WINDOW_SIZE/2,
    local_minimum[1] - INITIAL_WINDOW_SIZE/2:local_minimum[1]+INITIAL_WINDOW_SIZE/2
    ] = True

    certain_non_bone_pixels = np.full_like(hsv_image[:, :, 0], False, bool)
    certain_non_bone_pixels[0:BORDER_SIZE, :] = True
    certain_non_bone_pixels[-BORDER_SIZE:-1, :] = True
    certain_non_bone_pixels[:, 0:BORDER_SIZE] = True
    certain_non_bone_pixels[:, -BORDER_SIZE:-1] = True

    smoothed_hsv = median(hsv_image[:, :, 0], disk(50))
    threshold = MU * np.median(smoothed_hsv[certain_bone_pixels])

    possible_bones = np.zeros_like(hsv_image[:, :, 0])
    possible_bones[smoothed_hsv < threshold] = 1

    markers = np.zeros_like(possible_bones)
    markers[certain_bone_pixels] = 1
    markers[certain_non_bone_pixels] = 2

    labels = morphology.watershed(-possible_bones, markers)

    return labels
Ejemplo n.º 14
0
def prob3():
    """Define the matrices A and B as arrays. Calculate the matrix product ABA,
    change its data type to np.int64, and return it.
    """
    A = np.triu(np.ones((7,7)))
    B = np.full_like(A, 5) - np.tril(np.full_like(A, 6))
    return np.dot(np.dot(A, B), A).astype(np.int64)
Ejemplo n.º 15
0
    def resample(self, bin_count=120):
        start_i = int(self.t0 * self.sample_rate)
        end_i = util.clip(start_i + int(self.dt * self.sample_rate),
                          start_i, sys.maxsize)
        bin_size = (end_i - start_i) // bin_count
        if bin_size < 1:
            bin_size = 1
        bin_count = len(np.arange(start_i, end_i, bin_size))

        data = np.empty((self.data.shape[1], 2*bin_count, 4), dtype=np.float32)

        for i, column in enumerate(self.data):
            v = mea.min_max_bin(self.data[column].values[start_i:end_i],
                                bin_size, bin_count+1)
            col, row = mea.coordinates_for_electrode(column)
            row = 12 - row - 1
            x = np.full_like(v, col, dtype=np.float32)
            y = np.full_like(v, row, dtype=np.float32)
            t = np.arange(0, bin_count, 0.5, dtype=np.float32)
            data[i] = np.column_stack((x, y, t, v))

        # Update shader
        self.program['a_position'] = data.reshape(
            2*self.data.shape[1]*bin_count, 4)
        self.program['u_width'] = bin_count
Ejemplo n.º 16
0
def march(x,u_e,nu):
    dx = numpy.diff(x)
    du_e = numpy.gradient(u_e,numpy.gradient(x))
    delta = numpy.full_like(x,0.)
    lam = numpy.full_like(x,lam0)

    # Initial conditions must be a stagnation point. If u_e[0]>0
    # assume stagnation is at x=0 and integrate from x=0..x[0].
    if u_e[0]<0.01:                     # stagnation point
        delta[0] = numpy.sqrt(lam0*nu/du_e[0])
    elif x[0]>0:                        # just downstream
        delta[0] = numpy.sqrt(lam0*nu*x[0]/u_e[0])
        delta[0] += 0.5*x[0]*g_pohl(delta[0],0,u_e,du_e,nu)
        lam[0] = delta[0]**2*du_e[0]/nu
    else:
        raise ValueError('x=0 must be stagnation point')

    # march!
    for i in range(len(x)-1):
        delta[i+1] = heun(g_pohl,delta[i],i,dx[i],
                          u_e,du_e,nu)  # ...additional arguments
        lam[i+1] = delta[i+1]**2*du_e[i+1]/nu

        if lam[i+1] < -12: i-=1; break  # separation condition

    return delta,lam,i+1                # return with separation index
Ejemplo n.º 17
0
def plot_energy(run_summary, x_axis):
            
    plt.figure()
    
    if x_axis == "time":
        x_variable = run_summary.times / yr
        xlabel = "Time [yr]"
        xscale = "linear"
        plt.xscale(xscale)
        xfmt = plt.gca().get_xaxis().get_major_formatter() # needs to be set AFTER plt.xscale()
        if xscale == "log":
            mask = x_variable > 1
        elif xscale == "linear":
            mask = np.full_like(x_variable, True, dtype=bool) 
            xfmt.set_powerlimits((-2, 2)) # force scientific notation outside this range

    elif x_axis == "checkpoints":
        x_variable = np.arange(len(run_summary.times))
        xlabel = "Checkpoint"
        xscale = "linear"
        mask = np.full_like(x_variable, True, dtype=bool) 

        plt.xscale(xscale)
        xfmt = plt.gca().get_xaxis().get_major_formatter() # needs to be set AFTER plt.xscale()

    else:
        raise NotImplementedError("can't recognize x_axis value: " + x_axis)

    E_err = (run_summary.E_tot - run_summary.E_tot[0]) / run_summary.E_tot[0]
    plt.plot(x_variable[mask], E_err[mask])
    plt.xscale(xscale)
    plt.xlabel(xlabel)   
    plt.gca().xaxis.set_major_formatter(xfmt)
    plt.ylabel("Fractional Change (Energy)")
    SNe_distplot(run_summary, x_axis)

    plt.figure()
    plt.plot(x_variable[mask], run_summary.E_tot[mask], label="E_tot" )
    plt.plot(x_variable[mask], run_summary.E_kin[mask], label="E_kin" )
    plt.plot(x_variable[mask], run_summary.E_int[mask], label="E_int" )
    plt.legend(loc="best")
    plt.xscale(xscale)
    plt.xlabel(xlabel) 
    plt.gca().xaxis.set_major_formatter(xfmt)
    plt.ylabel("Energy [erg]")
    SNe_distplot(run_summary, x_axis)

    
    plt.figure()
    plt.plot(x_variable[mask], run_summary.E_R_tot[mask], label="E_Remnant" )
    plt.legend(loc="best")
    plt.xscale(xscale)
    plt.xlabel(xlabel)  
    plt.gca().xaxis.set_major_formatter(xfmt)
    plt.ylabel("Energy [erg]")
    SNe_distplot(run_summary, x_axis)

    if x_axis == "checkpoints":
        plt.xlim(xmin=0)
Ejemplo n.º 18
0
    def plot(self):
        self.graph.clearPlot()
        self.validindices = numpy.empty((0,), dtype=int)
        self.current_selection = []
        group, target_indices = self.selected_split()
        self.warning([0, 1])
        self.error(1)

        if self.data and group is not None and target_indices:
            X = self.data.X
            I1 = grouputils.group_selection_mask(
                self.data, group, target_indices)
            I2 = ~I1
            if isinstance(group, grouputils.RowGroup):
                X = X.T

            N1, N2 = numpy.count_nonzero(I1), numpy.count_nonzero(I2)

            if not N1 or not N2:
                self.error(
                    1, "Target labels most exclude/include at least one value."
                )

            if N1 < 2 and N2 < 2:
                self.warning(
                    0, "Insufficient data to compute statistics. "
                       "More than one measurement per class should be provided"
                )

            X1, X2 = X[:, I1], X[:, I2]
            if numpy.any(X1 < 0.0) or numpy.any(X2 < 0):
                self.error(
                    "Negative values in the input. The inputs cannot be in "
                    "ratio scale."
                )
                X1 = numpy.full_like(X1, numpy.nan)
                X2 = numpy.full_like(X2, numpy.nan)

            with numpy.errstate(divide="ignore", invalid="ignore"):
                fold = numpy.log2(numpy.mean(X1, axis=1) /
                                  numpy.mean(X2, axis=1))
                # TODO: handle missing values better (mstats)
                _, P = scipy.stats.ttest_ind(X1, X2, axis=1, equal_var=True)
                logP = numpy.log10(P)
                if numpy.isscalar(logP):
                    # ttest_ind does not preserve output shape if either
                    # a or b is empty
                    logP = numpy.full(fold.shape, numpy.nan)

            mask = numpy.isfinite(fold) & numpy.isfinite(logP)
            self.validindices = numpy.flatnonzero(mask)
            self.graph.setPlotData(numpy.array([fold[mask], -logP[mask]]).T)

            self.infoLabel.setText("%i genes on input" % len(fold))
            # ("{displayed} displayed, {undef} with undefined ratio "
            #  "or t-statistics.")

            if not len(numpy.flatnonzero(mask)):
                self.warning(1, "Could not compute statistics for any genes!")
Ejemplo n.º 19
0
def add_location_data(ds, lat, lon):
    lat = lat if lat else LAT_FILL
    lon = lon if lon else LON_FILL
    lat_array = np.full_like(ds.time.values, lat)
    lon_array = np.full_like(ds.time.values, lon)

    ds['lat'] = ('obs', lat_array, {'axis': 'Y', 'units': 'degrees_north', 'standard_name': 'latitude'})
    ds['lon'] = ('obs', lon_array, {'axis': 'X', 'units': 'degrees_east', 'standard_name': 'longitude'})
Ejemplo n.º 20
0
    def distance_to(self, source):
        src_lats = num.full_like(self.lats, fill_value=source.lat)
        src_lons = num.full_like(self.lons, fill_value=source.lon)

        target_coords = self.get_latlon()
        target_lats = target_coords[:, 0]
        target_lons = target_coords[:, 1]
        return distance_accurate50m_numpy(
            src_lats, src_lons, target_lats, target_lons)
Ejemplo n.º 21
0
    def test_basic(self):
        # Check derivative at endpoints
        n1_10 = np.arange(1, 10)
        dataset0 = np.column_stack([n1_10, np.full_like(n1_10, 0), np.full_like(n1_10, -1)])
        FuncData(_smirnovp, dataset0, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])

        n2_10 = np.arange(2, 10)
        dataset1 = np.column_stack([n2_10, np.full_like(n2_10, 1.0), np.full_like(n2_10, 0)])
        FuncData(_smirnovp, dataset1, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
Ejemplo n.º 22
0
def march(x,u_e,du_e,nu):
    delta0 = numpy.sqrt(lam0*nu/du_e[0])                # set delta0
    delta = numpy.full_like(x,delta0)                   # delta array
    lam = numpy.full_like(x,lam0)                       # lambda array
    for i in range(len(x)-1):                           # march!
        delta[i+1] = heun(g_pohl,delta[i],i,x[i+1]-x[i],    # integrate BL using...
                          u_e,du_e,nu)                          # additional arguments
        lam[i+1] = delta[i+1]**2*du_e[i+1]/nu               # compute lambda
        if abs(lam[i+1])>12: break                          # check stop condition
    return delta,lam,i                                  # return with separation index
Ejemplo n.º 23
0
def test_single_percentile_data():

    n = 1000
    x = np.arange(n, dtype=np.float)
    y = np.ones(n)

    s = scaling.lin_cdf_match(y, x)
    nptest.assert_almost_equal(s, np.full_like(s, np.nan))
    s = scaling.cdf_match(y, x)
    nptest.assert_almost_equal(s, np.full_like(s, np.nan))
Ejemplo n.º 24
0
Archivo: nphue.py Proyecto: ak15199/rop
def h_to_rgb(h, sat=1, val=255.0):
    # Local variation of hsv_to_rgb that only cares about a variable
    # hue, with (s,v) assumed to be constant
    # h should be a numpy array with values between 0.0 and 1.0
    # hsv_to_rgb returns an array of uints between 0 and 255.
    s = np.full_like(h, sat)
    v = np.full_like(h, val)
    hsv = np.dstack((h, s, v))

    return hsv_to_rgb(hsv)
Ejemplo n.º 25
0
def test_jam_axi_vel():
    """
    Usage example for jam_axi_vel().
    It takes about 5s on a 2.5 GHz computer

    """
    np.random.seed(123)
    xbin, ybin = np.random.uniform(low=[-55, -40], high=[55, 40], size=[1000, 2]).T

    inc = 60.                                                # assumed galaxy inclination
    r = np.sqrt(xbin**2 + (ybin/np.cos(np.radians(inc)))**2) # Radius in the plane of the disk
    a = 40                                                   # Scale length in arcsec
    vr = 2000*np.sqrt(r)/(r+a)                               # Assumed velocity profile
    vel = vr * np.sin(np.radians(inc))*xbin/r                # Projected velocity field
    sig = 8700/(r+a)                                         # Assumed velocity dispersion profile
    rms = np.sqrt(vel**2 + sig**2)                           # Vrms field in km/s

    surf = np.array([39483., 37158., 30646., 17759., 5955.1, 1203.5, 174.36, 21.105, 2.3599, 0.25493])
    sigma = np.array([0.153, 0.515, 1.58, 4.22, 10, 22.4, 48.8, 105, 227, 525])
    qObs = np.full_like(sigma, 0.57)

    distance = 16.5   # Assume Virgo distance in Mpc (Mei et al. 2007)
    mbh = 1e8 # Black hole mass in solar masses
    beta = np.full_like(surf, 0.3)

    surf_lum = surf # Assume self-consistency
    sigma_lum = sigma
    qobs_lum = qObs
    surf_pot = surf
    sigma_pot = sigma
    qobs_pot = qObs

    sigmapsf = 0.6
    pixsize = 0.8
    goodbins = r > 10  # Arbitrarily exclude the center to illustrate how to use goodbins

    # First the M/L is determined by fitting the Vrms.
    # In general beta_z and the inclination will also be
    # fitted at this stage as described in Cappellari (2008)

    rmsModel, ml, chi2, flux = jam_axi_rms(
        surf_lum, sigma_lum, qobs_lum, surf_pot, sigma_pot, qobs_pot,
        inc, mbh, distance, xbin, ybin, plot=True, rms=rms, goodbins=goodbins,
        sigmapsf=sigmapsf, beta=beta, pixsize=pixsize, tensor='zz')
    plt.pause(0.01)

    # The velocity is fitted at the best fitting M/L, beta_z and
    # inclination determined at the previous stage

    surf_pot *= ml  # Scale the density by the best fitting M/L
    velModel, kappa, chi2, flux = jam_axi_vel(
        surf_lum, sigma_lum, qobs_lum, surf_pot, sigma_pot, qobs_pot,
        inc, mbh, distance, xbin, ybin, plot=True, vel=vel, goodbins=goodbins,
        sigmapsf=sigmapsf, beta=beta, pixsize=pixsize, component='z')
    plt.pause(0.01)
Ejemplo n.º 26
0
def detection_efficiency(mes, mesthresh, version):
    """Gives the Kepler pipeline detection efficiency for a given
       MES.  In other words whats the propability that a transit
       signal of a given MES is recovered by the kepler pipeline.
       There are a variety of functional forms, use the version
       argument to select the one you want.
       INPUT:
         mes - Multiple even statistic
         mesthresh - Mes threshold.  Normally 7.1 unless TPS
                     times out at higher value
         version - Integer specifying functional form
             0 = Standard Theory prob=0.5 at MES=7.1 follows erf
             1 = Fressin et al. (2013) linear ramp 6<MES<16
             2 = Christiansen et al. (2015) gamma distribution
                   for Q1-Q16 pipeline run
       OUTPUT:
         prob - probability for detection [0.0-1.0]
    """
    # Each version has some hard coded parameters
    parm1s = [0.0, 6.0, 4.65]
    parm2s = [1.0, 16.0, 0.98] 
    # Get parameters for specified version
    p1 = parm1s[version]
    p2 = parm2s[version]
    # Do verion 0 which is erf form
    if version == 0:
        muoffset = p1
        sig = p2
        prob = np.full_like(mes, 1.0)
        abssnrdiff = np.abs(mes - mesthresh - muoffset);
        prob = np.where(abssnrdiff < 9.0, \
                        0.5 + (0.5*spec.erf( \
                        abssnrdiff / np.sqrt(2.0*sig**2))),\
                        prob)
        prob = np.where(mes < (mesthresh + muoffset), 1.0 - prob, prob)
    # Do version 1 which is linear ramp
    elif version == 1:
        mesmin = p1
        mesmax = p2
        slope = 1.0 / (mesmax - mesmin)
        prob = (mes - mesmin) * slope
        prob = np.where(prob < 0.0, 0.0, prob)
        prob = np.where(prob > 1.0, 1.0, prob)
    # Do version 2 which is gamma cdf
    elif version == 2:
        a = p1
        b = p2
        usemes = mes - 4.1 - (mesthresh - 7.1)
        usemes = np.where(usemes < 0.0, 0.0, usemes)
        gammawant = stat.gamma(a,loc=0.0,scale=b)
        prob = gammawant.cdf(usemes)
    else:
        prob = np.full_like(mes, 0.0)

    return prob
def add_location(dic):
    df0 = dic[dic.keys()[0]]
    df0['location'] = np.full_like(df0['datetime'].astype(str), dic.keys()[0])
    for key in dic.keys():
        if key == dic.keys()[0]:
            pass
        else:
            df1 = dic[key]
            df1['location'] = np.full_like(df1.datetime.astype(str), key)
        df0 = pd.concat([df0, df1])
    return dic
Ejemplo n.º 28
0
def firlp_lowpass1(numtaps, deltap, deltas, cutoff, width, fs, tol=None):
    # Edges of the transition band, expressed as radians per sample.
    wp = np.pi*(cutoff - 0.5*width)/(0.5*fs)
    ws = np.pi*(cutoff + 0.5*width)/(0.5*fs)
    # Grid density.
    density = 16*numtaps/np.pi
    # Number of grid points in the pass band.
    numfreqs_pass = int(np.ceil(wp*density))
    # Number of grid points in the stop band.
    numfreqs_stop = int(np.ceil((np.pi - ws)*density))

    # Grid of frequencies in the pass band.
    wpgrid = np.linspace(0, wp, numfreqs_pass)
    # Remove the first; the inequality associated with this frequency
    # will be replaced by an equality constraint.
    wpgrid = wpgrid[1:]
    # Grid of frequencies in the pass band.
    wsgrid = np.linspace(ws, np.pi, numfreqs_stop)

    # wgrid is the combined array of frequencies.
    wgrid = np.concatenate((wpgrid, wsgrid))

    # The array of weights in the linear programming problem.
    weights = np.concatenate((np.full_like(wpgrid, fill_value=1/deltap),
                              np.full_like(wsgrid, fill_value=1/deltas)))
    # The array of desired frequency responses.
    desired = np.concatenate((np.ones_like(wpgrid),
                              np.zeros_like(wsgrid)))

    R = (numtaps - 1)//2
    C = np.cos(wgrid[:, np.newaxis] * np.arange(R+1))
    V = 1/weights[:, np.newaxis]

    A = np.block([[C, -V], [-C, -V]])
    b = np.block([[desired, -desired]]).T
    c = np.zeros(R+2)
    c[-1] = 1

    # The equality constraint corresponding to H(0) = 1.
    A_eq = np.ones((1, R+2))
    A_eq[:, -1] = 0
    b_eq = np.array([1])

    print("numfreqs_pass ="******"  numfreqs_stop =", numfreqs_stop)

    print("R =", R)
    print("c.shape =", c.shape)
    print("A.shape =", A.shape)
    print("b.shape =", b.shape)
    print("A_eq.shape =", A_eq.shape)
    print("b_eq.shape =", b_eq.shape)

    taps_lp = solve_linprog(c, A, b, A_eq=A_eq, b_eq=b_eq, tol=tol)
    return taps_lp
Ejemplo n.º 29
0
    def addAxisTicks(self, painter, cont, dirn, linecoords, tickprops,
                     ticklabelsprop, tickvals):
        """Add ticks for the vals and tick properties class given.
        linecoords: coordinates of start and end points of lines
        labelprops: properties of label, or None
        cont: container to add ticks
        dirn: 'x', 'y', 'z' for axis
        """

        ticklen = tickprops.length * 1e-3
        tfracs = self.dataToLogicalCoords(tickvals, scaling=False)

        outstart = []
        outend = []
        for op1, op2 in self.getAutoMirrorCombs():
            # where to draw tick from
            op1pts = N.full_like(tfracs, op1)
            op2pts = N.full_like(tfracs, op2)
            # where to draw tick to
            op1pts2 = N.full_like(tfracs, op1+ticklen*(1 if op1 < 0.5 else -1))
            op2pts2 = N.full_like(tfracs, op2+ticklen*(1 if op2 < 0.5 else -1))

            # swap coordinates depending on axis direction
            if dirn == 'x':
                ptsonaxis = (tfracs, op1pts, op2pts)
                ptsoff1 = (tfracs, op1pts2, op2pts)
                ptsoff2 = (tfracs, op1pts, op2pts2)
            elif dirn == 'y':
                ptsonaxis = (op1pts, tfracs, op2pts)
                ptsoff1 = (op1pts2, tfracs, op2pts)
                ptsoff2 = (op1pts, tfracs, op2pts2)
            else:
                ptsonaxis = (op1pts, op2pts, tfracs)
                ptsoff1 = (op1pts2, op2pts, tfracs)
                ptsoff2 = (op1pts, op2pts2, tfracs)

            outstart += [N.ravel(N.column_stack(ptsonaxis)),
                         N.ravel(N.column_stack(ptsonaxis))]
            outend += [N.ravel(N.column_stack(ptsoff1)),
                       N.ravel(N.column_stack(ptsoff2))]

        # add labels for ticks and axis label
        if ticklabelsprop is not None:
            self.addLabels(
                cont, linecoords, ticklabelsprop, tfracs, tickvals,
                self.settings.label, self.settings.Label)

        # add ticks themselves
        if not tickprops.hide:
            startpts = threed.ValVector(N.concatenate(outstart))
            endpts = threed.ValVector(N.concatenate(outend))
            lineprop = tickprops.makeLineProp(painter)
            cont.addObject(threed.LineSegments(startpts, endpts, lineprop))
Ejemplo n.º 30
0
    def make_meshes(self):
        # X-axis
        xcenters = self.get_xdata()
        # Z-axis
        zvalues = self.get_zdata()
        print 'z-valus: {0}'.format(zvalues)

        if self.xerr_asym:
            xerr_lo, xerr_hi = self.get_xerr()
        else:
            xerr_lo, xerr_hi = self.get_xerr(), self.get_xerr()
        xlowedges, xupedges = xcenters - xerr_lo, xcenters + xerr_hi
        list_xgrid = [xlowedges[0]]
        list_zvalues = []
        list_zmask = []
        for ix, x in enumerate(xcenters):
            #if ix==0:
            #    list_xgrid.append(xlowedges[ix])
            if abs(xlowedges[ix]-list_xgrid[-1])<0.01:
                list_xgrid.append(xupedges[ix])
            elif xlowedges[ix]>list_xgrid[-1]:
                list_zvalues.append(np.full_like(list_zvalues[0], float(sys.maxint)))
                list_zmask.append(np.full_like(zvalues[0], True, dtype=bool))
                list_xgrid.append(xlowedges[ix])
                list_xgrid.append(xupedges[ix])
            else:
                logging.critical('X bin edge (No.{0}) {1} is smaller than the previous one {2}!!!'.format(ix, xlowedges[ix], list_xgrid[-1]))
                logging.critical('Difference: {0}'.format(xlowedges[ix]-list_xgrid[-1]))
                logging.critical('Filled grids: {0}'.format(list_xgrid))
                logging.critical('Lower edges: {0}'.format(xlowedges))
                logging.critical('Upper edges: {0}'.format(xupedges))
                sys.exit(1)
            list_zvalues.append(zvalues[ix])
            list_zmask.append(np.full_like(zvalues[0], False, dtype=bool))
        xedges = np.array(list_xgrid)
        self.z_mesh = np.ma.array(list_zvalues, mask=list_zmask).T
        print 'Z mesh: {0}'.format(self.z_mesh)

        # Y-axis
        for iys, ys in enumerate(self.lst_ydata):
            if any(ys!=self.lst_ydata[0]):
                logging.critical('Y bin edges do NOT match!!!')
                logging.critical('In the first time bin: {0}'.format(self.lst_ydata[0]))
                logging.critical('In the {0}th time bin: {1}'.format(iys, ys))
                sys.exit(1)
        yedges = self.lst_ydata[0]

        # Y-X mesh
        self.x_mesh, self.y_mesh = np.meshgrid(xedges, yedges)

        print 'X mesh: {0}'.format(self.x_mesh.shape)
        print 'Y mesh: {0}'.format(self.y_mesh.shape)
        print 'Z mesh: {0}'.format(self.z_mesh.shape)
Ejemplo n.º 31
0
    Returns
    -------
    None
    """

    try:
        dr = DataReader("test_data1.csv")

    except ValueError:
        pytest.fail("validate_csv_data failed to correctly determine that "
                    "the time and voltage arrays are of the same length.")


@pytest.mark.parametrize("test_array, expected_can_interp", [
    (np.append(np.zeros(100), np.full_like(
        np.zeros(1), np.nan, dtype=np.double)), True),
    (np.append(np.zeros(10), np.full_like(
        np.zeros(10), np.nan, dtype=np.double)), False),
    (np.append(np.zeros(9), np.full_like(np.zeros(1), np.nan,
                                         dtype=np.double)), True),
    (np.append(np.zeros(8), np.full_like(np.zeros(1), np.nan,
                                         dtype=np.double)), False),
])
def test_can_interp(dr, test_array, expected_can_interp):
    """Tests the can_interp function to determine if it returns true for
    arrays were more than 90% of values are defined, and returns false for
    arrays where less than 90% of values are defined

    Parameters
    ----------
    dr: DataReader
def _calculate_signal(settings, t):

    # pylint: disable=too-many-branches

    x = t * settings.frequency + settings.phase / 360.0

    if settings.func == DwfAnalogOutFunction.DC:

        # All-zero, independent of the symmetry value.
        y = np.zeros_like(x)

    elif settings.func == DwfAnalogOutFunction.Sine:

        # The angle of which the sine is taken varies as a three-part piecewise linear function.
        angle = _waveform_triangle(settings.symmetry, x) * (0.5 * np.pi)

        y = np.sin(angle)

    elif settings.func == DwfAnalogOutFunction.Square:
        # Amplitude in range -1 .. 1
        q = np.clip(settings.symmetry / 100.0, 0.0, 1.0)

        if q == 0.0:
            y = np.full_like(x, -1.0)
        else:
            x = np.mod(x, 1)
            y = np.sign(q - x)

    elif settings.func == DwfAnalogOutFunction.Triangle:

        y = _waveform_triangle(settings.symmetry, x)

    elif settings.func == DwfAnalogOutFunction.RampUp:

        q = np.clip(settings.symmetry / 100.0, 0.0, 1.0)

        if q == 0.0:
            y = np.ones_like(x)
        else:
            x = np.mod(x, 1.0)
            y = (x - np.abs(x - q)) / q

    elif settings.func == DwfAnalogOutFunction.RampDown:

        q = np.clip(settings.symmetry / 100.0, 0.0, 1.0)

        if q == 1.0:
            y = np.ones_like(x)
        else:
            x = np.mod(x, 1.0)
            y = ((x - 1) + np.abs(x - q)) / (q - 1)

    elif settings.func == DwfAnalogOutFunction.Pulse:
        # Amplitude in range 0 .. 1
        y = 0.5 * (1.0 + _waveform_square(settings.symmetry, x))

    elif settings.func == DwfAnalogOutFunction.Trapezium:

        x = np.mod(x, 1.0)

        q = np.clip(0.25 * (settings.symmetry / 100.0), 0.000000001, 0.25)

        y = (-1 + 2*x - np.abs(q - x) + np.abs(q - x + 0.5) + np.abs(q + x - 1.0) - np.abs(q + x - 0.5)) / (2 * q)

    elif settings.func == DwfAnalogOutFunction.SinePower:

        plain_old_sine = np.sin(x * 2 * np.pi)

        # In the SinePower wave function, the 'symmetry' value is abused
        # to indicate and exponent between 1.0 and 0.0.

        exponent_setting = np.clip(settings.symmetry, -99.999999999, 100.000) / 100.0

        if exponent_setting >= 0:
            exponent = (1.0 - exponent_setting)
        else:
            exponent = 1.0 / (1.0 + exponent_setting)

        y = np.copysign(np.abs(plain_old_sine) ** exponent, plain_old_sine)

    return y
Ejemplo n.º 33
0
 def attenuation(frequency):
     return np.full_like(frequency, value)
 def constant_function(x, y, z):
     return np.full_like(x, percent_polarized)
Ejemplo n.º 35
0
def SFMSimpleResp(S, channel, stimParams = []):
    '''
    # S is the cellStructure
    # channel is a dictionary with the parameters specifying the filter/model
    # returns object with simpleResp and other things

    # SFMSimpleResp       Computes response of simple cell for sfmix experiment

    # SFMSimpleResp(varargin) returns a simple cell response for the
    # mixture stimuli used in sfMix. The cell's receptive field is the n-th
    # derivative of a 2-D Gaussian that need not be circularly symmetric.

    # 1/23/17 - Edits: Added stimParamsm, make_own_stim so that I can set what
    # stimuli I want when simulating from model
    '''
    make_own_stim = 0;
    if stimParams: # i.e. if we actually have non-empty stimParams
        make_own_stim = 1;
        if not 'template' in stimParams:
            stimParams['template'] = S;
        if not 'repeats' in stimParams:
            stimParams['repeats'] = 10; # why 10? To match experimental #repetitions

    # Load the data structure
    T = S.get('sfm');

    # Get preferred stimulus values
    prefSf = channel.get('pref').get('sf');                              # in cycles per degree
    # CHECK LINE BELOW
    prefTf = round(numpy.nanmean(T.get('exp').get('trial').get('tf')[0]));     # in cycles per second

    # Get directional selectivity - removed 7/18/17

    # Get derivative order in space and time
    dOrdSp = channel.get('dord').get('sp');
    dOrdTi = channel.get('dord').get('ti');

    # Get aspect ratio in space - removed 7/18/17

    # Get spatial coordinates
    xCo = 0;                                                              # in visual degrees, centered on stimulus center
    yCo = 0;                                                              # in visual degrees, centered on stimulus center

    # Store some results in M
    M = dict();
    pref = dict();
    dord = dict();
    pref.setdefault('sf', prefSf);
    pref.setdefault('tf', prefTf);
    pref.setdefault('xCo', xCo);
    pref.setdefault('yCo', yCo);
    dord.setdefault('sp', dOrdSp);
    dord.setdefault('ti', dOrdTi);
    
    M.setdefault('pref', pref);
    M.setdefault('dord', dord);
    
    # Pre-allocate memory
    z             = T.get('exp').get('trial');
    nSf           = 1;
    nGratings     = 7;
    nFrames       = 120;
    if make_own_stim == 1:
        nTrials = stimParams.get('repeats'); # to keep consistent with number of repetitions used for each stim. condition
    else: # CHECK THIS GUY BELOW
        nTrials = len(z['num']);
    
    # set it zero
    M['simpleResp'] = numpy.zeros((nFrames, nTrials));

    # Compute simple cell response for all trials
    for p in range(nTrials): 
    
        # Set stim parameters
        if make_own_stim == 1:

            all_stim = makeStimulus(stimParams.get('stimFamily'), stimParams.get('conLevel'), \
                                                                    stimParams.get('sf_c'), stimParams.get('template'));

            stimOr = all_stim.get('Ori');
            stimTf = all_stim.get('Tf');
            stimCo = all_stim.get('Con');
            stimPh = all_stim.get('Ph');
            stimSf = all_stim.get('Sf');
        else:
            stimOr = numpy.empty((nGratings,));
            stimTf = numpy.empty((nGratings,));
            stimCo = numpy.empty((nGratings,));
            stimPh = numpy.empty((nGratings,));
            stimSf = numpy.empty((nGratings,));
            
            for iC in range(nGratings):
                stimOr[iC] = z.get('ori')[iC][p] * math.pi/180; # in radians
                stimTf[iC] = z.get('tf')[iC][p];          # in cycles per second
                stimCo[iC] = z.get('con')[iC][p];         # in Michelson contrast
                stimPh[iC] = z.get('ph')[iC][p] * math.pi/180;  # in radians
                stimSf[iC] = z.get('sf')[iC][p];          # in cycles per degree
                
        if numpy.count_nonzero(numpy.isnan(stimOr)): # then this is a blank stimulus, no computation to be done
            continue;
                
        # I. Orientation, spatial frequency and temporal frequency
        # Compute orientation tuning - removed 7/18/17

        # Compute spatial frequency tuning
        sfRel = stimSf / prefSf;
        s     = pow(stimSf, dOrdSp) * numpy.exp(-dOrdSp/2 * pow(sfRel, 2));
        sMax  = pow(prefSf, dOrdSp) * numpy.exp(-dOrdSp/2);
        sNl   = s/sMax;
        selSf = sNl;

        # Compute temporal frequency tuning
        tfRel = stimTf / prefTf;
        t     = pow(stimTf, dOrdTi) * numpy.exp(-dOrdTi/2 * pow(tfRel, 2));
        tMax  = pow(prefTf, dOrdTi) * numpy.exp(-dOrdTi/2);
        tNl   = t/tMax;
        selTf = tNl;

        # II. Phase, space and time
        omegaX = stimSf * numpy.cos(stimOr); # the stimulus in frequency space
        omegaY = stimSf * numpy.sin(stimOr);
        omegaT = stimTf;

        P = numpy.empty((nFrames, 3)); # nFrames for number of frames, two for x and y coordinate, one for time
        P[:,0] = 2*math.pi*xCo*numpy.ones(nFrames,);  # P is the matrix that contains the relative location of each filter in space-time (expressed in radians)
        P[:,1] = 2*math.pi*yCo*numpy.ones(nFrames,); # P(:,0) and p(:,1) describe location of the filters in space

        # Pre-allocate some variables
        if nSf == 1:
            respSimple = numpy.zeros(nFrames,);
        else:
            respSimple = numpy.zeros(nFrames, nSf);

        for iF in range(nSf):
            if isinstance(xCo, int):
                factor = 1;
            else:
                factor = len(xCo);

            linR1 = numpy.zeros((nFrames*factor, nGratings)); # pre-allocation
            linR2 = numpy.zeros((nFrames*factor, nGratings));
            linR3 = numpy.zeros((nFrames*factor, nGratings));
            linR4 = numpy.zeros((nFrames*factor, nGratings));
            
            computeSum = 0; # important constant: if stimulus contrast or filter sensitivity equals zero there is no point in computing the response

            for c in range(nGratings): # there are up to nine stimulus components
                selSi = selSf[c]*selTf[c]; # filter sensitivity for the sinusoid in the frequency domain

                if selSi != 0 and stimCo[c] != 0:
                    computeSum = 1;
                                   
                    # Use the effective number of frames displayed/stimulus duration
                    stimPos = numpy.asarray(range(nFrames))/float(nFrames) + \
                                            stimPh[c] / (2*math.pi*stimTf[c]); # 120 frames + the appropriate phase-offset
                    P3Temp  = numpy.full_like(P[:, 1], stimPos);
                    #P3Temp  = repmat(stimPos, 1, len(xCo));
                    P[:,2]  = 2*math.pi*P3Temp; # P(:,2) describes relative location of the filters in time.

                    omegas = numpy.vstack((omegaX[c], omegaY[c], omegaT[c])); # make this a 3 x len(omegaX) array
                    rComplex = selSi*stimCo[c]*numpy.exp(1j*numpy.dot(P, omegas));

                    linR1[:,c] = rComplex.real.reshape(linR1[:,c].shape);  # four filters placed in quadrature
                    linR2[:,c] = -1*rComplex.real.reshape(linR2[:,c].shape);
                    linR3[:,c] = rComplex.imag.reshape(linR3[:,c].shape);
                    linR4[:,c] = -1*rComplex.imag.reshape(linR4[:,c].shape);

                if computeSum == 1:
                    respSimple1 = numpy.maximum(0, linR1.sum(1)); # superposition and half-wave rectification,...
                    respSimple2 = numpy.maximum(0, linR2.sum(1));
                    respSimple3 = numpy.maximum(0, linR3.sum(1));
                    respSimple4 = numpy.maximum(0, linR4.sum(1));

                    # if channel is tuned, it is phase selective...
                    if nSf == 1:
                        if channel.get('dord').get('sp') != 0:
                            respSimple = respSimple1;
                        elif channel.get('dord').get('sp') == 0:
                            respComplex = pow(respSimple1, 2) + pow(respSimple2, 2) \
                                + pow(respSimple3, 2) + pow(respSimple4, 2); 
                            respSimple = numpy.sqrt(respComplex);                         
                    else:        
                        if channel.get('dord').get('sp') != 0:
                            respSimple[iF, :] = respSimple1;
                        elif channel.get('dord').get('sp') == 0:
                            respComplex = pow(respSimple1, 2) + pow(respSimple2, 2) \
                                + pow(respSimple3, 2) + pow(respSimple4, 2); 
                            respSimple[iF, :] = numpy.sqrt(respComplex);
                        
        #pdb.set_trace();
            
        # Store response in desired format
        M['simpleResp'][:,p] = respSimple;
        
    return M;
Ejemplo n.º 36
0
def test_arithmetic():
    """Test evoked arithmetic."""
    ev = read_evokeds(fname, condition=0)
    ev20 = EvokedArray(np.ones_like(ev.data), ev.info, ev.times[0], nave=20)
    ev30 = EvokedArray(np.ones_like(ev.data), ev.info, ev.times[0], nave=30)

    tol = dict(rtol=1e-9, atol=0)
    # test subtraction
    sub1 = combine_evoked([ev, ev], weights=[1, -1])
    sub2 = combine_evoked([ev, -ev], weights=[1, 1])
    assert np.allclose(sub1.data, np.zeros_like(sub1.data), atol=1e-20)
    assert np.allclose(sub2.data, np.zeros_like(sub2.data), atol=1e-20)
    # test nave weighting. Expect signal ampl.: 1*(20/50) + 1*(30/50) == 1
    # and expect nave == ev1.nave + ev2.nave
    ev = combine_evoked([ev20, ev30], weights='nave')
    assert np.allclose(ev.nave, ev20.nave + ev30.nave)
    assert np.allclose(ev.data, np.ones_like(ev.data), **tol)
    # test equal-weighted sum. Expect signal ampl. == 2
    # and expect nave == 1/sum(1/naves) == 1/(1/20 + 1/30) == 12
    ev = combine_evoked([ev20, ev30], weights=[1, 1])
    assert np.allclose(ev.nave, 12.)
    assert np.allclose(ev.data, ev20.data + ev30.data, **tol)
    # test equal-weighted average. Expect signal ampl. == 1
    # and expect nave == 1/sum(weights²/naves) == 1/(0.5²/20 + 0.5²/30) == 48
    ev = combine_evoked([ev20, ev30], weights='equal')
    assert np.allclose(ev.nave, 48.)
    assert np.allclose(ev.data, np.mean([ev20.data, ev30.data], axis=0), **tol)
    # test zero weights
    ev = combine_evoked([ev20, ev30], weights=[1, 0])
    assert ev.nave == ev20.nave
    assert np.allclose(ev.data, ev20.data, **tol)

    # default comment behavior if evoked.comment is None
    old_comment1 = ev20.comment
    ev20.comment = None
    ev = combine_evoked([ev20, -ev30], weights=[1, -1])
    assert_equal(ev.comment.count('unknown'), 2)
    assert ('-unknown' in ev.comment)
    assert (' + ' in ev.comment)
    ev20.comment = old_comment1

    with pytest.raises(ValueError, match="Invalid value for the 'weights'"):
        combine_evoked([ev20, ev30], weights='foo')
    with pytest.raises(ValueError, match='weights must be the same size as'):
        combine_evoked([ev20, ev30], weights=[1])

    # grand average
    evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True)
    ch_names = evoked1.ch_names[2:]
    evoked1.info['bads'] = ['EEG 008']  # test interpolation
    evoked1.drop_channels(evoked1.ch_names[:1])
    evoked2.drop_channels(evoked2.ch_names[1:2])
    gave = grand_average([evoked1, evoked2])
    assert_equal(gave.data.shape, [len(ch_names), evoked1.data.shape[1]])
    assert_equal(ch_names, gave.ch_names)
    assert_equal(gave.nave, 2)
    with pytest.raises(TypeError, match='All elements must be an instance of'):
        grand_average([1, evoked1])
    gave = grand_average([ev20, ev20, -ev30])  # (1 + 1 + -1) / 3  =  1/3
    assert_allclose(gave.data, np.full_like(gave.data, 1. / 3.))

    # test channel (re)ordering
    evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True)
    data2 = evoked2.data  # assumes everything is ordered to the first evoked
    data = (evoked1.data + evoked2.data) / 2.
    evoked2.reorder_channels(evoked2.ch_names[::-1])
    assert not np.allclose(data2, evoked2.data)
    with pytest.warns(RuntimeWarning, match='reordering'):
        evoked3 = combine_evoked([evoked1, evoked2], weights=[0.5, 0.5])
    assert np.allclose(evoked3.data, data)
    assert evoked1.ch_names != evoked2.ch_names
    assert evoked1.ch_names == evoked3.ch_names
Ejemplo n.º 37
0
import numpy as np
import cv2
import matplotlib.pyplot as plt

img = cv2.imread('/Users/nami/Documents/tester/boxies/image_062.png')
# img = img[...,::-1]
matte = cv2.imread(
    '/Users/nami/Documents/background_removal_test/indexnet_matting/examples/mattes/image_062.png'
)
h, w, _ = img.shape
bg = np.full_like(img, 255)  #white background

img = img.astype(float)
bg = bg.astype(float)

matte = matte.astype(float) / 255.0
img = cv2.multiply(img, matte)
bg = cv2.multiply(bg, 1.0 - matte)
outImage = cv2.add(img, bg)
# plt.subplot(1,2,1)
# plt.imshow(img)
# plt.subplot(1,2,2)
# plt.imshow(outImage/255)
cv2.imwrite(
    '/Users/nami/Documents/background_removal_test/indexnet_matting/examples/images/63.png',
    outImage)
# cv2.imshow('image', outImage/255)

# cv2.waitKey(0)
# cv2.destroyAllWindows()
# plt.show()
Ejemplo n.º 38
0
def _decompress(data, mask, dtype):
    data_blank = np.full_like(mask, np.nan, dtype=dtype)
    data_blank[mask] = data
    data_blank.shape = mask.shape
    return data_blank
Ejemplo n.º 39
0
plt.figure(figsize=(10, 7))
G = gridspec.GridSpec(2, 3)
ax1 = plt.subplot(G[0, :])
ax2 = plt.subplot(G[1, 0])
ax3 = plt.subplot(G[1, 1])
ax4 = plt.subplot(G[1, 2])

# Reachability plot
colors = ['g.', 'r.', 'b.', 'y.', 'c.']
for klass, color in zip(range(0, 5), colors):
    Xk = space[labels == klass]
    Rk = reachability[labels == klass]
    ax1.plot(Xk, Rk, color, alpha=0.3)
ax1.plot(space[labels == -1], reachability[labels == -1], 'k.', alpha=0.3)
ax1.plot(space, np.full_like(space, 2., dtype=float), 'k-', alpha=0.5)
ax1.plot(space, np.full_like(space, 0.5, dtype=float), 'k-.', alpha=0.5)
ax1.set_ylabel('Reachability (epsilon distance)')
ax1.set_title('Reachability Plot')

# OPTICS
colors = ['g.', 'r.', 'b.', 'y.', 'c.']
for klass, color in zip(range(0, 5), colors):
    Xk = X[clust.labels_ == klass]
    ax2.plot(Xk[:, 0], Xk[:, 1], color, alpha=0.3)
ax2.plot(X[clust.labels_ == -1, 0], X[clust.labels_ == -1, 1], 'k+', alpha=0.1)
ax2.set_title('Automatic Clustering\nOPTICS')

# DBSCAN at 0.5
colors = ['g', 'greenyellow', 'olive', 'r', 'b', 'c']
for klass, color in zip(range(0, 6), colors):
Ejemplo n.º 40
0
        # Calculate the posterior probabilities of the point being from distr a or b
        bi = (likelihood(x, mu_b, sig_b) * pb) / (
            likelihood(x, mu_b, sig_b) * pb + likelihood(x, mu_a, sig_a) * pa)
        ai = 1 - bi

        #print(i, x, bi, ai)

        posteriors_b.append(bi)
        posteriors_a.append(ai)

    # Calculate new estimates for the parameters using the posteriors as weights in a weighted average

    new_mu_a = np.average(n, weights=posteriors_a)
    new_mu_b = np.average(n, weights=posteriors_b)

    new_sig_a = np.average((n - np.full_like(n, mu_a))**2,
                           weights=posteriors_a)
    new_sig_b = np.average((n - np.full_like(n, mu_b))**2,
                           weights=posteriors_b)
    new_sig_a = np.sqrt(new_sig_a)
    new_sig_b = np.sqrt(new_sig_b)

    # Upload parameter lists with new parameters
    guesses_mu_a.append(new_mu_a)
    guesses_mu_b.append(new_mu_b)
    guesses_sig_a.append(new_sig_a)
    guesses_sig_b.append(new_sig_b)

    print('%.2f %.2f' % (new_mu_a, new_mu_b))
    print('%.2f %.2f' % (new_sig_a, new_sig_b))
Ejemplo n.º 41
0
    def updateInfoAndCounters(cobra, xNew, yNewEval, conNewEval, phase):

        cobra['Fsteepness'] = [0] * cobra['nObj']
        cobra['A'] = np.vstack((cobra['A'], xNew))
        cobra['lastX'] = xNew
        cobra['Fres'] = np.vstack((cobra['Fres'], yNewEval))
        cobra['Gres'] = np.vstack((cobra['Gres'], conNewEval))

        FresStandardized = np.full_like(cobra['Fres'], 0)
        FresStandardizedMean = np.zeros(cobra['nObj'])
        FresStandardizedStd = np.zeros(cobra['nObj'])

        FresPlogStandardized = np.full_like(cobra['Fres'], 0)
        FresPlogStandardizedMean = np.zeros(cobra['nObj'])
        FresPlogStandardizedStd = np.zeros(cobra['nObj'])

        for obji in range(cobra['nObj']):
            res, mean, std = standardize_obj(cobra['Fres'][:, obji])
            FresStandardized[:, obji] = res
            FresStandardizedMean[obji] = mean
            FresStandardizedStd[obji] = std

            plogFres = plog(cobra['Fres'][:, obji])
            res, mean, std = standardize_obj(plogFres)
            FresPlogStandardized[:, obji] = res
            FresPlogStandardizedMean[obji] = mean
            FresPlogStandardizedStd[obji] = std

        cobra['FresStandardized'] = FresStandardized
        cobra['FresStandardizedMean'] = FresStandardizedMean
        cobra['FresStandardizedStd'] = FresStandardizedStd
        cobra['lastF'] = FresStandardized[-1]

        cobra['FresPlogStandardized'] = FresPlogStandardized
        cobra['FresPlogStandardizedMean'] = FresPlogStandardizedMean
        cobra['FresPlogStandardizedStd'] = FresPlogStandardizedStd

        GresRescaled = np.full_like(cobra['Gres'], 0)
        GresRescaledDivider = np.zeros(cobra['nConstraints'])
        GresPlogRescaled = np.full_like(cobra['Gres'], 0)
        GresPlogRescaledDivider = np.zeros(cobra['nConstraints'])
        for coni in range(cobra['nConstraints']):
            GresRescaled[:, coni], GresRescaledDivider[coni] = rescale_constr(
                cobra['Gres'][:, coni])
            plogGres = plog(cobra['Gres'][:, coni])
            GresPlogRescaled[:, coni], GresPlogRescaledDivider[
                coni] = rescale_constr(plogGres)

        cobra['GresRescaled'] = GresRescaled
        cobra['GresRescaledDivider'] = GresRescaledDivider

        cobra['GresPlogRescaled'] = GresPlogRescaled
        cobra['GresPlogRescaledDivider'] = GresPlogRescaledDivider

        pff = paretofrontFeasible(cobra['Fres'], cobra['Gres'])
        pf = cobra['Fres'][pff]
        cobra['paretoFrontier'] = pf
        cobra['paretoFrontierFeasible'] = pff

        hv = hypervolume(pf, cobra['ref'])
        cobra['currentHV'] = hv

        newNumViol = np.sum(conNewEval > 0)
        newMaxViol = max(0, max(conNewEval))

        if newNumViol == 0:
            cobra['hypervolumeProgress'] = np.append(
                cobra['hypervolumeProgress'], hv)
        else:
            cobra['hypervolumeProgress'] = np.append(
                cobra['hypervolumeProgress'], cobra['hypervolumeProgress'][-1])

        cobra['numViol'] = np.append(cobra['numViol'], newNumViol)
        cobra['maxViol'] = np.append(cobra['maxViol'], newMaxViol)
        cobra['phase'].append(phase)

        for ci in range(cobra['nConstraints']):
            if conNewEval[ci] <= 0:
                cobra['EPS'][ci] = cobra['EPS'][ci] * 0.9
            else:
                cobra['EPS'][ci] = np.minimum(1.1 * cobra['EPS'][ci],
                                              cobra['epsilonMax'][ci])

        return (cobra)
Ejemplo n.º 42
0
mp_face = mp.solutions.face_mesh
mesh = mp_face.FaceMesh()

#OPENCV image bgr default mediapipe rgb

img = cv2.imread(
    "train/testface.jpg"
)  ##########just change these 2 lines and read your images and run the script. If you don't want the  blurred(gauss) output go the line 180
img2 = cv2.imread("train/otherface.jpg")

if img.shape != img2.shape:
    print("img1 shape: ", img.shape)
    print("img2 shape:", img2.shape)
    raise ValueError("images must be same shape!")

mask = np.full_like(img, 255)

height, weight, channel = img.shape

img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_gaus = cv2.GaussianBlur(img_rgb, (7, 7), 60)

result = mesh.process(img_rgb)
indexes_triangles = []

# img2=cv2.resize(img2,(500,500))
uz, gen, kan = img2.shape
img_rgb2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
img2_gray = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)
result2 = mesh.process(img_rgb2)
Ejemplo n.º 43
0
def plot_graphical_explanation(saxdt_model,
                               x,
                               rule,
                               title,
                               legend_label,
                               figsize,
                               dpi,
                               fontsize,
                               text_height,
                               labelfontsize,
                               loc,
                               frameon,
                               is_factual_for_counterexemplar,
                               contained_subsequences,
                               fixed_contained_subsequences=True,
                               forced_y_lim=None,
                               return_y_lim=False,
                               draw_on=None,
                               print_word=True,
                               enhance_not_contained=False,
                               no_axes_labels=False):
    fig, ax = plt.subplots(figsize=figsize, dpi=dpi)
    ax.set_title(title, fontsize=fontsize)
    ax.plot(x.ravel().T if draw_on is None else draw_on.ravel().T,
            c="royalblue",
            alpha=0.2,
            lw=3,
            label=legend_label)
    #ax.plot(x.ravel().T if draw_on is None else draw_on.T, c="royalblue", alpha=0.2, lw=3, label=legend_label)

    for i, idx_word in enumerate(rule["features"][:-1]):
        feature = saxdt_model.name_dictionary[idx_word]
        threshold_sign = rule["thresholds_signs"][i]
        dummy_ts = np.full_like(x.ravel(), np.nan)
        if idx_word in contained_subsequences:
            if fixed_contained_subsequences:
                subsequence = contained_subsequences[idx_word][0]
            else:
                if is_factual_for_counterexemplar and (len(
                        contained_subsequences[idx_word]) == 2):
                    subsequence = contained_subsequences[idx_word][1]
                else:
                    subsequence = contained_subsequences[idx_word][0]
        else:
            if enhance_not_contained:
                maximum = 0
                subseq = None
                for subsequence in saxdt_model.subsequence_dictionary[
                        idx_word][:, :, 0]:
                    dist = sliding_window_euclidean(x.ravel(), subsequence)
                    if dist > maximum:
                        maximum = dist
                        subseq = subsequence
                subsequence = subseq
            else:
                subsequence = compute_medoid(
                    saxdt_model.subsequence_dictionary[idx_word][:, :, 0])
        best_alignment_start_idx = sliding_window_distance(
            x.ravel(), subsequence)
        best_alignment_end_idx = best_alignment_start_idx + len(subsequence)
        start_idx = best_alignment_start_idx
        end_idx = best_alignment_end_idx
        if end_idx == len(x.ravel()):
            end_idx -= 1
            subsequence = subsequence[:-1]
        dummy_ts[start_idx:end_idx] = subsequence
        if threshold_sign == "contained":
            ax.plot(dummy_ts, c="#2ca02c", alpha=0.5, lw=5, label="contained")
            plt.text(
                (start_idx + end_idx) / 2,
                # np.nanmin(dummy_ts) + text_height + ((np.nanmin(dummy_ts) + np.nanmax(dummy_ts))/2),
                text_height + np.mean(subsequence),
                str(idx_word) if not print_word else str(idx_word) + " (" +
                feature.decode("utf-8") + ")",
                fontsize=fontsize - 2,
                c="#2ca02c",
                horizontalalignment='center',
                verticalalignment='center',
                weight='bold',
                path_effects=[
                    patheffects.Stroke(linewidth=3,
                                       foreground='white',
                                       alpha=0.6),
                    patheffects.Normal()
                ])
        else:
            ax.plot(dummy_ts,
                    c="#d62728",
                    alpha=0.5,
                    lw=5,
                    linestyle="--",
                    label="not-contained")
            plt.text(
                (best_alignment_start_idx + best_alignment_end_idx) / 2,
                # np.nanmin(dummy_ts) + text_height + ((np.nanmin(dummy_ts) + np.nanmax(dummy_ts))/2),
                text_height + np.mean(subsequence),
                str(idx_word) if not print_word else str(idx_word) + " (" +
                feature.decode("utf-8") + ")",
                fontsize=fontsize - 2,
                c="#d62728",
                horizontalalignment='center',
                verticalalignment='center',
                weight='bold',
                path_effects=[
                    patheffects.Stroke(linewidth=3,
                                       foreground='white',
                                       alpha=0.6),
                    patheffects.Normal()
                ])
    handles, labels = plt.gca().get_legend_handles_labels()
    by_label = dict(zip(labels, handles))
    plt.tick_params(axis='both', which='major', labelsize=fontsize)
    if not no_axes_labels:
        plt.xlabel("time-steps", fontsize=fontsize)
        plt.ylabel("value", fontsize=fontsize)
    plt.legend(by_label.values(),
               by_label.keys(),
               frameon=frameon,
               fontsize=labelfontsize,
               loc=loc)
    if forced_y_lim is not None:
        plt.gca().set_ylim(forced_y_lim)
    if return_y_lim:
        y_lim = plt.gca().get_ylim()
    plt.show()
    if return_y_lim:
        return y_lim
Ejemplo n.º 44
0
def bag_of_words_sum(
    input_text, compression, number_of_sentence
):  #input_text:入力テキスト,compression:圧縮率,number_of_sentence:文字数制限
    #一番初めに専門用語の配列を作成
    """
    wordlist = []
    path = "dataset.txt"
    with open(path) as f:
        for s_line in f:
            #print(s_line)
            s_line = s_line.strip("\n")
            wordlist.append(s_line)
    """
    #input_text=("経路制御技術(ルーティング)とは、ネットワーク層が行う処理で目的のパケットの宛先のIPアドレスまでどのようにIPを経由して送られるかについて制御を行う技術である。\nインターネットにおける経路制御技術は、直接転送と間接転送がある。直接転送では、同一のネットワーク上のホストに転送を行い、ルータを経由する必要がない。一方間接転送は、異なるネットワーク上のホストへの転送を行う。\n経路選択には、事前に経路を決定する静的経路制御とルータ間でルーティングプロトコルを使用し、経路表を作成する動的経路制御の2種類がある。静的経路制御では、経路表が事前に作られているため、経路が安定する。しかし経路数が多くなった場合は設定が複雑になることやルータの障害の際に再設定が必要である。一方、動的経路制御はルータが経路を計算するため、経路計算の負荷や冗長性のある経路選択が行われるなどのデメリットがあるが、経路表を自動で作るため、障害迂回の際の経路変更が自動で行われるなどのメリットがある。今回の課題では、動的経路制御の一種であるOSPFのプロトコルについて調査を行う。\n")
    input_text_word = input_text
    input_text = input_text.strip("\n")
    input_text = input_text.split('。')
    input_text_copy = input_text
    #形態素分析(pip3 install janomeが必要)
    tokenslist = []
    tokenslist_dic = []
    for i in range(len(input_text) - 1):
        tokenslist1 = []
        t = Tokenizer()  # 字句解析器の作成
        tokens = t.tokenize(input_text[i])  # 形態素解析tokens[]のなかに一つづつ含まれる。
        for token in tokens:
            tokenslist1.append(token.surface)
            tokenslist_dic.append(token.surface)
        tokenslist.append(tokenslist1)

    #print(tokens[0].surface) # 結果の表示(.surfaceを使うと文字の表示ができる)
    #print(tokenslist)

    #辞書を作る
    tokenslist_dic = list(set(tokenslist_dic))  #重複を消してあげる。
    vec = np.zeros((len(tokenslist), len(tokenslist_dic)))  #配列の用意

    #bags of words を作る!!
    for i in range(len(tokenslist)):
        for j in range(len(tokenslist_dic)):
            vec[i][j] = tokenslist[i].count(tokenslist_dic[j])

    #print(vec)
    #print(cosinSimilarity(vec[0],vec[0]))

    graph = np.zeros((len(tokenslist), len(tokenslist)), dtype="float32")

    for i in range(len(tokenslist)):
        for j in range(len(tokenslist)):
            graph[i][j] = cosinSimilarity(vec[i], vec[j])

    #print(graph) #cos類似度の対応関係の配列ができる!!

    #隣接行列を作ってあげる
    para = 0.3  #比較のパラメータ
    compare = np.full_like(graph, para)  #比較用配列
    adjacency = graph > compare  #閾値処理を行う

    #確率行列を作る
    rundom_graph = np.zeros_like(graph)
    for i in range(len(tokenslist)):
        sum_one = np.sum(adjacency[i])
        for j in range(len(tokenslist)):
            rundom_graph[i][j] = adjacency[i][j] / sum_one  #和が1になる行列を作る

    ratings = power_method(rundom_graph, len(tokenslist), 0.01)

    #print((ratings)) #どの文が重要かを示してくれてる!

    #文字列の出力
    compression = compression / 100  #圧縮率(%)
    #number_of_sentence = len(tokenslist) # 文字数

    sort_index = np.argsort(ratings)  #大きい順にソートした時のインデックス
    output = []
    output_index = []
    for i in range(int(len(tokenslist) / 2)):
        output_index.append(sort_index[i])

    output_index.sort
    output_index = np.array(output_index)
    output_index = np.sort(output_index)  #文章番号のソート

    for i in range(len(output_index)):
        output.append(input_text_copy[output_index[i]])

    # リストから文章に変更する。
    output_text = ""
    for i in range(len(output)):
        output_text = output_text + output[i] + "。"
    #重要語句の抽出
    wordlist = Extract_ImportantWords(output_text)
    return output_text, wordlist
    def fit(self, X, y):
        """
        Fits this ``SelfTrainingClassifier`` to a dataset.

        Parameters
        ----------
        X : array-like, shape = (n_samples, n_features)
            array representing the data

        y : array-like, shape = (n_samples,)
            array representing the labels. Unlabeled samples should have the
            label -1.

        Returns
        -------
        self : object
            returns an instance of self.
        """
        # we need row slicing support for sparce matrices
        X, y = self._validate_data(X,
                                   y,
                                   accept_sparse=['csr', 'csc', 'lil', 'dok'])

        if self.base_estimator is None:
            raise ValueError("base_estimator cannot be None!")

        self.base_estimator_ = clone(self.base_estimator)

        if self.max_iter is not None and self.max_iter < 0:
            raise ValueError("max_iter must be >= 0 or None,"
                             f" got {self.max_iter}")

        if not (0 <= self.threshold < 1):
            raise ValueError("threshold must be in [0,1),"
                             f" got {self.threshold}")

        if self.criterion not in ['threshold', 'k_best']:
            raise ValueError(f"criterion must be either 'threshold' "
                             f"or 'k_best', got {self.criterion}.")

        if y.dtype.kind in ['U', 'S']:
            raise ValueError("y has dtype string. If you wish to predict on "
                             "string targets, use dtype object, and use -1"
                             " as the label for unlabeled samples.")

        has_label = y != -1

        if np.all(has_label):
            warnings.warn("y contains no unlabeled samples", UserWarning)

        if self.criterion == 'k_best' and (self.k_best >
                                           X.shape[0] - np.sum(has_label)):
            warnings.warn(
                "k_best is larger than the amount of unlabeled "
                "samples. All unlabeled samples will be labeled in "
                "the first iteration", UserWarning)

        self.transduction_ = np.copy(y)
        self.labeled_iter_ = np.full_like(y, -1)
        self.labeled_iter_[has_label] = 0

        self.n_iter_ = 0

        while not np.all(has_label) and (self.max_iter is None
                                         or self.n_iter_ < self.max_iter):
            self.n_iter_ += 1
            self.base_estimator_.fit(X[safe_mask(X, has_label)],
                                     self.transduction_[has_label])

            if self.n_iter_ == 1:
                # Only validate in the first iteration so that n_iter=0 is
                # equivalent to the base_estimator itself.
                _validate_estimator(self.base_estimator)

            # Predict on the unlabeled samples
            prob = self.base_estimator_.predict_proba(X[safe_mask(
                X, ~has_label)])
            pred = self.base_estimator_.classes_[np.argmax(prob, axis=1)]
            max_proba = np.max(prob, axis=1)

            # Select new labeled samples
            if self.criterion == 'threshold':
                selected = max_proba > self.threshold
            else:
                n_to_select = min(self.k_best, max_proba.shape[0])
                if n_to_select == max_proba.shape[0]:
                    selected = np.ones_like(max_proba, dtype=bool)
                else:
                    # NB these are indicies, not a mask
                    selected = \
                        np.argpartition(-max_proba, n_to_select)[:n_to_select]

            # Map selected indices into original array
            selected_full = np.nonzero(~has_label)[0][selected]

            # Add newly labeled confident predictions to the dataset
            self.transduction_[selected_full] = pred[selected]
            has_label[selected_full] = True
            self.labeled_iter_[selected_full] = self.n_iter_

            if selected_full.shape[0] == 0:
                # no changed labels
                self.termination_condition_ = "no_change"
                break

            if self.verbose:
                print(f"End of iteration {self.n_iter_},"
                      f" added {selected_full.shape[0]} new labels.")

        if self.n_iter_ == self.max_iter:
            self.termination_condition_ = "max_iter"
        if np.all(has_label):
            self.termination_condition_ = "all_labeled"

        self.base_estimator_.fit(X[safe_mask(X, has_label)],
                                 self.transduction_[has_label])
        self.classes_ = self.base_estimator_.classes_
        return self
Ejemplo n.º 46
0
    # plt.show()
    # plt.close()

    # pmra/pmdec density estimation plot
    pmra_vals = np.linspace(-12, 9, 350)
    pmdec_vals = np.linspace(-14, 4, 350)
    xx, yy = np.meshgrid(pmra_vals, pmdec_vals)
    pos = np.empty(xx.shape + (2,))
    pos[:, :, 0] = xx
    pos[:, :, 1] = yy

    file_density = 'density_{:.2f}_{:.2f}.pkl'.format(mean_parallax, d_parallax)
    if path.isfile(file_density):
        total_density = joblib.load(file_density)
    else:
        total_density = np.full_like(xx, fill_value=0.)
        for i_s, star in enumerate(gaia_data):
            if i_s % 500 == 0:
                print i_s
            cov = np.array([[star['pmra_error'], 0], [0, star['pmdec_error']]])
            mean = np.array([star['pmra'], star['pmdec']])
            total_density += multivariate_normal.pdf(pos, mean=mean, cov=cov)
        joblib.dump(total_density, file_density)

    plt.imshow(total_density, interpolation='none', origin='lower', cmap='viridis',
               extent=[-12, 9, -14, 4],
               vmin=0,  # np.percentile(total_density, 0),
               vmax=1300,  # np.percentile(total_density, 100)
               )
    plt.colorbar()
    plt.contour(xx, yy, total_density, [0, 50, 200, 350, 500, 650, 800, 950, 1100, 1250, 1400, 1550, 1700],
Ejemplo n.º 47
0
def compute_shape(segmentation,
                  angle_correction=True,
                  param_centerline=None,
                  verbose=1):
    """
    Compute morphometric measures of the spinal cord in the transverse (axial) plane from the segmentation.
    The segmentation could be binary or weighted for partial volume [0,1].
    :param segmentation: input segmentation. Could be either an Image or a file name.
    :param angle_correction:
    :param param_centerline: see centerline.core.ParamCenterline()
    :param verbose:
    :return metrics: Dict of class Metric(). If a metric cannot be calculated, its value will be nan.
    :return fit_results: class centerline.core.FitResults()
    """
    # List of properties to output (in the right order)
    property_list = [
        'area',
        'angle_AP',
        'angle_RL',
        'diameter_AP',
        'diameter_RL',
        'eccentricity',
        'orientation',
        'solidity',
    ]

    im_seg = Image(segmentation).change_orientation('RPI')

    # Getting image dimensions. x, y and z respectively correspond to RL, PA and IS.
    nx, ny, nz, nt, px, py, pz, pt = im_seg.dim

    # Extract min and max index in Z direction
    data_seg = im_seg.data
    X, Y, Z = (data_seg > 0).nonzero()
    min_z_index, max_z_index = min(Z), max(Z)

    # Initialize dictionary of property_list, with 1d array of nan (default value if no property for a given slice).
    shape_properties = {
        key: np.full_like(np.empty(nz), np.nan, dtype=np.double)
        for key in property_list
    }

    if angle_correction:
        # compute the spinal cord centerline based on the spinal cord segmentation
        # here, param_centerline.minmax needs to be False because we need to retrieve the total number of input slices
        _, arr_ctl, arr_ctl_der, fit_results = get_centerline(
            im_seg, param=param_centerline, verbose=verbose)
    else:
        fit_results = None

    # Loop across z and compute shape analysis
    for iz in tqdm(range(min_z_index, max_z_index + 1),
                   unit='iter',
                   unit_scale=False,
                   desc="Compute shape analysis",
                   ascii=True,
                   ncols=80):
        # Extract 2D patch
        current_patch = im_seg.data[:, :, iz]
        if angle_correction:
            # Extract tangent vector to the centerline (i.e. its derivative)
            tangent_vect = np.array([
                arr_ctl_der[0][iz - min_z_index] * px,
                arr_ctl_der[1][iz - min_z_index] * py, pz
            ])
            # Normalize vector by its L2 norm
            tangent_vect = tangent_vect / np.linalg.norm(tangent_vect)
            # Compute the angle about AP axis between the centerline and the normal vector to the slice
            v0 = [tangent_vect[0], tangent_vect[2]]
            v1 = [0, 1]
            angle_AP_rad = np.math.atan2(np.linalg.det([v0, v1]),
                                         np.dot(v0, v1))
            # Compute the angle about RL axis between the centerline and the normal vector to the slice
            v0 = [tangent_vect[1], tangent_vect[2]]
            v1 = [0, 1]
            angle_RL_rad = np.math.atan2(np.linalg.det([v0, v1]),
                                         np.dot(v0, v1))
            # Apply affine transformation to account for the angle between the centerline and the normal to the patch
            tform = transform.AffineTransform(scale=(np.cos(angle_RL_rad),
                                                     np.cos(angle_AP_rad)))
            # Convert to float64, to avoid problems in image indexation causing issues when applying transform.warp
            current_patch = current_patch.astype(np.float64)
            # TODO: make sure pattern does not go extend outside of image border
            current_patch_scaled = transform.warp(
                current_patch,
                tform.inverse,
                output_shape=current_patch.shape,
                order=1,
            )
        else:
            current_patch_scaled = current_patch
            angle_AP_rad, angle_RL_rad = 0.0, 0.0
        # compute shape properties on 2D patch
        shape_property = _properties2d(current_patch_scaled, [px, py])
        if shape_property is not None:
            # Add custom fields
            shape_property['angle_AP'] = angle_AP_rad * 180.0 / math.pi
            shape_property['angle_RL'] = angle_RL_rad * 180.0 / math.pi
            # Loop across properties and assign values for function output
            for property_name in property_list:
                shape_properties[property_name][iz] = shape_property[
                    property_name]
        else:
            logging.warning('\nNo properties for slice: {}'.format(iz))
        """ DEBUG
        from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
        from matplotlib.figure import Figure
        fig = Figure()
        FigureCanvas(fig)
        ax = fig.add_subplot(111)
        ax.imshow(current_patch_scaled)
        ax.grid()
        ax.set_xlabel('y')
        ax.set_ylabel('x')
        fig.savefig('tmp_fig.png')
        """
    metrics = {}
    for key, value in shape_properties.items():
        # Making sure all entries added to metrics have results
        if not value == []:
            metrics[key] = Metric(data=np.array(value), label=key)

    return metrics, fit_results
Ejemplo n.º 48
0
import numpy as np
import pandas as pd

# set the date range of output files
st = '20190724 18:00'
et = '20190724 23:55'
dt = 5  # unit: min

# get basic file
datadir = './lda/'
base = 'wrflda_d03_2019-07-25_00:00:00'
ds = xr.open_dataset(datadir + base)

# load data and create zero DataArray
da = ds['LDACHECK']
ds['LDACHECK'] = da.copy(data=np.full_like(da, 0.))

# generate output filenames based on the date range
dr = pd.date_range(st, et, freq=str(dt) + 'T')
filenames = [d.strftime(f'{base[:10]}_%Y-%m-%d_%H:%M:%S') for d in dr]

# create files (zero values) based on filenames
for tindex, f in enumerate(filenames):
    # generate 'Times' variable
    Times = xr.DataArray(np.array([dr[tindex]], dtype=np.dtype(('S', 19))),
                         dims=['Time'])
    ds['Times'] = Times

    ncfile = datadir + f
    print('Saving to {}'.format(ncfile))
    ds.to_netcdf(ncfile,
Ejemplo n.º 49
0
# 创建一个shape生成的数组,每个元素的值为value
full = np.full((2, 3), 2)
print('full-->', full)
# 创建一个正方的n*n的单位矩阵,对角线为1 其余为0
eye = np.eye(4)
print('eye--->', eye)

# 根据数组a的形状生成全1数组
one_like = np.ones_like(full)
print('one_like--->', one_like)

# 根据数组a的形状生成全0数组
zero_like = np.zeros_like(full)
print('zero_like--->', zero_like)

# 根据数组a的形状生成全value数组
full_like = np.full_like(z, 3)
print('full_like--->', full_like)

linespace1 = np.linspace(2.0, 3.0, num=5)
print('linespace1----->', linespace1)

linespace2 = np.linspace(1, 10, 4)
print('linespace2----->', linespace2)

linespace3 = np.linspace(1, 10, 4, endpoint=False)
print('linespace3----->', linespace3)

concatenate = np.concatenate((x, y))
print('concatenate----->', concatenate)
Ejemplo n.º 50
0
plt.ylabel('Frequency')
plt.title('Histogram of village foundations in different periods')

# 4.1.2 Density
# KDE
x = np.array(df_vil_wgs84['AD'])
x_d = np.linspace(1200, 1400, 201)

kde = KernelDensity(bandwidth=5, kernel='gaussian')
kde.fit(x[:, None])

# score_samples returns the log of the probability density
logprob = kde.score_samples(x_d[:, None])

plt.fill_between(x_d, np.exp(logprob), alpha=0.5)
plt.plot(x, np.full_like(x, -0.01), '|k', markeredgewidth=1)
plt.ylim(0, 0.025)
plt.xlim(1220, 1380)
plt.xlabel('Time A.D.')
plt.ylabel('Density of village foundation')

# 4.1.3 Distance between Events Concept

plt.plot(df_vil_wgs84["AD"], df_vil_wgs84["id"], color='gray')
plt.scatter(df_vil_wgs84["AD"], df_vil_wgs84["id"], color='darkgray')
plt.grid(axis='y', alpha=0.75)
plt.grid(axis='x', alpha=0.75)
plt.xlabel('Time A.D.')
plt.ylabel('id')

# Interval
Ejemplo n.º 51
0
def compute_cj_estimate(posterior_sample,
                        lnlikefunc,
                        lnpriorfunc,
                        param_post,
                        nsamples,
                        qprob=None,
                        lnlikeargs=(),
                        lnpriorargs=(),
                        lnlike_post=None,
                        lnprior_post=None):
    """
    Computes the Chib & Jeliazkov estimate of the bayesian evidence.
    The estimation is based on an posterior sample with n elements
    (indexed by s, with s = 0, ..., n-1), and a sample from the proposal
    distribution used in MCMC (qprob) of size nsample. Note that if qprob is
    None, it is estimated as a multivariate Gaussian.
    :param array posterior_sample:
        A sample from the parameter posterior distribution. Dimensions are
        (n x k), where k is the number of parameters.
    :param callable lnlikefunc:
        Function to compute ln(likelihood) on the marginal samples.
    :param callable lnpriorfunc:
        Function to compute ln(prior density) on the marginal samples.
    :param array param_post:
        Posterior parameter sample used to obtained fixed point needed by the
        algorithm.
    :param int nsamples:
        Size of sample drawn from proposal distribution.
    :param object or None qprob:
        Proposal distribution function. If None, it will be estimated as a
        multivariate Gaussian. If not None, it must possess the methods pdf and
        rvs. See scipy.stats.rv_continuous.
    :param tuple lnlikeargs:
        Extra arguments passed to the likelihood function.
    :param tuple lnpriorargs:
        Extra arguments passed to the lnprior function.
    :param array lnlike_post:
        log(likelihood) computed over a posterior sample. 1-D array of length n.
    :param array lnprior_post:
        log(prior) computed over a posterior sample. 1-D array of length n.
    :raises AttributeError:
        if instace qprob does not have method 'pdf' or 'rvs'.
    :raises TypeError:
        if methods 'pdf' or 'rvs' from instance qprob are not callable.
    :returns: Natural logarithm of estimated Bayesian evidence.
    References
    ----------
    Chib & Jeliazkov (2001): Journal of the Am. Stat. Assoc.; Mar 2001; 96, 453
    """
    #Find fixed point on which to estimate posterior ordinate.
    if lnlike_post is not None:
        #Pass values of log(likelihood) in posterior sample.
        arg_fp = [
            lnlike_post,
        ]
    else:
        #Pass function that computes log(likelihood).
        arg_fp = [
            lnlikefunc,
        ]
    if lnlike_post is not None:
        #Pass values of log(prior) in posterior sample.
        arg_fp.append(lnprior_post)
    else:
        #Pass function that computes log(prior).
        arg_fp.append(lnpriorfunc)
    fp, lnpost0 = get_fixed_point(posterior_sample,
                                  param_post,
                                  lnlikefunc,
                                  lnpriorfunc,
                                  lnlikeargs=lnlikeargs,
                                  lnpriorargs=lnpriorargs)
    #If proposal distribution is not given, define as multivariate Gaussian.
    if qprob is None:
        #Get covariance from posterior sample
        k = np.cov(posterior_sample.T)
        qprob = lib.MultivariateGaussian(fp, k)
    else:
        #Check that qprob has the necessary attributes
        for method in ('pdf', 'rvs'):
            try:
                att = getattr(qprob, method)
            except AttributeError:
                raise AttributeError('qprob does not have method '
                                     '\'{}\''.format(method))
            if not callable(att):
                raise TypeError('{} method of qprob is not '
                                'callable'.format(method))
    #Compute proposal density in posterior sample
    q_post = qprob.pdf(posterior_sample)
    #If likelihood over posterior sample is not given, compute it
    if lnlike_post is None:
        lnlike_post = lnlikefunc(posterior_sample, *lnlikeargs)
    #Idem for prior
    if lnprior_post is None:
        lnprior_post = lnpriorfunc(posterior_sample, *lnpriorargs)
    #Compute Metropolis ratio with respect to fixed point over posterior sample
    lnalpha_post = metropolis_ratio(lnprior_post + lnlike_post, lnpost0)
    #Sample from the proposal distribution with respect to fixed point
    proposal_sample = qprob.rvs(nsamples)
    #Compute likelihood and prior on proposal_sample
    lnprior_prop = lnpriorfunc(proposal_sample, *lnpriorargs)
    if np.all(lnprior_prop == -np.inf):
        raise ValueError('All samples from proposal density have zero prior'
                         'probability. Increase nsample.')
    #Now compute likelihood only on the samples where prior != 0.
    lnlike_prop = np.full_like(lnprior_prop, -np.inf)
    ind = lnprior_prop != -np.inf
    lnlike_prop[ind] = lnlikefunc(proposal_sample[ind, :], *lnlikeargs)
    #Get Metropolis ratio with respect to fixed point over proposal sample
    lnalpha_prop = metropolis_ratio(lnpost0, lnprior_prop + lnlike_prop)
    #Compute estimate of posterior ordinate (see Eq. 9 from reference)
    num = log_sum(lnalpha_post + q_post) - log(len(posterior_sample))
    den = log_sum(lnalpha_prop) - log(len(proposal_sample))
    lnpostord = num - den
    #Return log(Evidence) estimation
    return lnpost0 - lnpostord
Ejemplo n.º 52
0
def _set_animation(
    pp_sampled_vals,
    ax,
    dtype=None,
    kind="density",
    alpha=None,
    drawstyle=None,
    linewidth=None,
    height=None,
    markersize=None,
    plot_kwargs=None,
):
    if kind == "kde":
        length = len(pp_sampled_vals)
        if dtype == "f":
            y_vals, lower, upper = _fast_kde(pp_sampled_vals[0])
            x_vals = np.linspace(lower, upper, len(y_vals))

            max_max = max(
                [max(_fast_kde(pp_sampled_vals[i])[0]) for i in range(length)])

            ax.set_ylim(0, max_max)

            (line, ) = ax.plot(x_vals, y_vals, **plot_kwargs)

            def animate(i):
                y_vals, lower, upper = _fast_kde(pp_sampled_vals[i])
                x_vals = np.linspace(lower, upper, len(y_vals))
                line.set_data(x_vals, y_vals)
                return line

        else:
            vals = pp_sampled_vals[0]
            y_vals, x_vals = histogram(vals, bins="auto")
            (line, ) = ax.plot(x_vals[:-1], y_vals, **plot_kwargs)

            max_max = max([
                max(histogram(pp_sampled_vals[i], bins="auto")[0])
                for i in range(length)
            ])

            ax.set_ylim(0, max_max)

            def animate(i):
                y_vals, x_vals = histogram(pp_sampled_vals[i], bins="auto")
                line.set_data(x_vals[:-1], y_vals)
                return (line, )

    elif kind == "cumulative":
        x_vals, y_vals = _empirical_cdf(pp_sampled_vals[0])
        (line, ) = ax.plot(x_vals,
                           y_vals,
                           alpha=alpha,
                           color="C5",
                           drawstyle=drawstyle,
                           linewidth=linewidth)

        def animate(i):
            x_vals, y_vals = _empirical_cdf(pp_sampled_vals[i])
            line.set_data(x_vals, y_vals)
            return line

    elif kind == "scatter":
        x_vals = pp_sampled_vals[0]
        y_vals = np.full_like(x_vals, height, dtype=np.float64)
        (line, ) = ax.plot(x_vals,
                           y_vals,
                           "o",
                           zorder=2,
                           color="C5",
                           markersize=markersize,
                           alpha=alpha)

        def animate(i):
            line.set_xdata(np.ravel(pp_sampled_vals[i]))
            return line

    def init():
        if kind != "scatter":
            line.set_data([], [])
        else:
            line.set_xdata([])
        return line

    return animate, init
Ejemplo n.º 53
0
def make_summary(
    input_text, compression, number_of_sentence
):  #input_text:入力テキスト,compression:圧縮率,number_of_sentence:文字数制限
    input_text = input_text.strip("\n")
    input_text = input_text.split('。')
    #print(input_text)
    input_text_copy = input_text
    #形態素分析(pip3 install janomeが必要)

    tokenslist = []
    tokenslist_dic = []
    #print(tokenslist)
    for i in range(len(input_text) - 1):
        tokenslist1 = []
        t = Tokenizer()  # 字句解析器の作成
        tokens = t.tokenize(input_text[i])  # 形態素解析tokens[]のなかに一つづつ含まれる。
        for token in tokens:
            #print(token) # 結果の表示
            tokenslist1.append(token.surface)
            tokenslist_dic.append(token.surface)
        tokenslist.append(tokenslist1)

    #print(tokens[0].surface) # 結果の表示(.surfaceを使うと文字の表示ができる)
    #print(tokenslist)

    #辞書を作る
    #print(tokenslist_dic)
    tokenslist_dic = list(set(tokenslist_dic))  #重複を消してあげる。
    #print(tokenslist_dic)
    #print(len(tokenslist_dic))
    vec = np.zeros((len(tokenslist), len(tokenslist_dic)))  #配列の用意
    #print(vec.shape)

    #bags of words を作る!!
    for i in range(len(tokenslist)):
        for j in range(len(tokenslist_dic)):
            #print(tokenslist[i].count(tokenslist_dic[j]))
            vec[i][j] = tokenslist[i].count(tokenslist_dic[j])

    #print(vec)

    #print(cosinSimilarity(vec[0],vec[0]))

    graph = np.zeros((len(tokenslist), len(tokenslist)), dtype="float32")

    for i in range(len(tokenslist)):
        for j in range(len(tokenslist)):
            graph[i][j] = cosinSimilarity(vec[i], vec[j])

    #print(graph) #cos類似度の対応関係の配列ができる!!

    #隣接行列を作ってあげる
    para = 0.3  #比較のパラメータ
    compare = np.full_like(graph, para)  #比較用配列

    adjacency = graph > compare
    #print(adjacency)

    #確率行列を作る
    rundom_graph = np.zeros_like(graph)

    for i in range(len(tokenslist)):
        sum_one = np.sum(adjacency[i])
        for j in range(len(tokenslist)):
            rundom_graph[i][j] = adjacency[i][j] / sum_one

    #print(rundom_graph)

    ratings = power_method(rundom_graph, len(tokenslist), 0.01)

    #print((ratings)) #どの文が重要かを示してくれてる!

    #文字列の出力

    compression = compression / 100  #圧縮率(%)
    #number_of_sentence = len(tokenslist) # 文字数

    sort_index = np.argsort(ratings)  #大きい順にソートした時のインデックス
    #print(sort_index)
    output = []
    output_index = []
    for i in range(int(len(tokenslist) / 2)):
        output_index.append(sort_index[i])

    output_index.sort
    output_index = np.array(output_index)
    output_index = np.sort(output_index)
    #print(output_index)

    for i in range(len(output_index)):
        #print(input_text_copy[output_index[i]])
        output.append(input_text_copy[output_index[i]])

    # リストから文章に変更する。
    output_text = "。".join(line for line in output if line)
    return output_text
Ejemplo n.º 54
0
def simulation(pts, net_layout, cost_dict, netsim_dict):
    '''
    Generates a network of paths.
    
    Parameters
    ----------
    
    pts: dataframe
        contains the identifier, row and column of each location
    
    net_layout: dataframe
        dataframe specifying origin and destination of each path in the network
    
    cost_dict: dictionary
        contains parameters used for ``calculate_iwdt()``
    
    netsim_dict: dictionary
        contains parameters needed to execute network simulation


    Returns
    -------
    
    Gt: 2D numpy array
        final ground potential
    
    paths: 2D numpy array
        sum of all network paths
        
    paths_dict: dictionary
        dictionary with the track of every path in network


    Notes
    -----
    
    ``simulate()`` generates a network of paths using the ``net_layout`` dataframe generated with the
    functions from the **generate** module.

    The ``netsim_dict{}`` must contain the following entries:

    - 'i: ' - float
      effect of new path. *Default:* 1.0
    - 'Gmax: ' - float
      maximum ground potential.
    - 'T: ' - float
      1/T represents the *residuality* of a path.
    - 'alpha: ' - float
      coefficient calculated from :math:`\\alpha_1 = \\frac {d_0} {ln(1 - NC_0)}`
    

    While the ``cost_dict{}`` must contain the same entries as ``iwdt_dict{}`` see **cost** module

        
    '''

    # unpack netsim dictionary
    i = netsim_dict['i']
    Gmax = netsim_dict['Gmax']
    T = netsim_dict['T']
    alpha = netsim_dict['alpha']

    # initialize netsim outputs
    Gt = np.zeros_like(cost_dict['dem'])
    Gt_1 = np.zeros_like(Gt)
    paths = np.zeros_like(Gt)

    # initialize paths dictionary
    path_lst = []  #paths_dict= {}#OrderedDict()

    for pth_id, pth_def in net_layout.iterrows():

        # origin & destination?
        o = pth_def['origin']
        d = pth_def['destination']

        # retrieve location @ origin & destination
        origin = [[
            pts.loc[pts['id'] == o, 'r'].values[0], pts.loc[pts['id'] == o,
                                                            'c'].values[0]
        ]]
        destination = [[
            pts.loc[pts['id'] == d, 'r'].values[0], pts.loc[pts['id'] == d,
                                                            'c'].values[0]
        ]]

        # initialize ACS
        acs = np.full_like(Gt, 999999.0)
        for r, c in origin:
            acs[r, c] = 0.0

        # calculated influence weighted distance transform
        acs, blx, bly = calculate_iwdt(acs, cost_dict)

        # create new path to destination
        path_t, path_info = pt.create_paths(blx,
                                            bly,
                                            origin,
                                            destination,
                                            start_path=pth_id)

        # update paths
        paths += path_t
        path_lst.append(path_info)  #paths_dict.update(path_info)

        # update ground potential
        Gt = Gt_1 - (Gt_1 / T) + path_t * i * (1 - (Gt_1 / Gmax))

        # update network cost
        d = np.full_like(Gt, 99999.0)
        d[Gt >= 1.0] = 0.0
        d = calculate_dt(d, cost_dict['cellsize'], option=2)
        cost_dict['netcost'] = 1.0 - np.exp(d / alpha)

        # update Gt_1
        Gt_1 = np.copy(Gt)

    return Gt, paths, path_lst  #paths_dict
Ejemplo n.º 55
0
 def from_shuffled(cls, shuffled_results: ShuffledResults):
     return _FinalResults(
         theta=np.full_like(shuffled_results.theta[:, 0], np.nan),
         skeletons=np.full_like(shuffled_results.scores[:, 0], np.nan),
         scores=np.full_like(shuffled_results.skeletons[:, 0], np.nan),
     )
Ejemplo n.º 56
0
def test_bounds(simulated_problem, method):
    """Test that non-binding bounds on parameters in simulated problems do not affect estimates and that binding bounds
    are respected.
    """
    simulation, _, problem, _ = simulated_problem

    # all problems will be solved with the same optimization method starting as close to the true parameters as possible
    solve = lambda s, p: problem.solve(
        np.minimum(np.maximum(simulation.sigma, s[0]), s[1]),
        np.minimum(np.maximum(simulation.pi, p[0]), p[1])
        if simulation.pi is not None else None,
        sigma_bounds=s,
        pi_bounds=p,
        steps=1,
        optimization=Optimization(method))

    # solve the problem when unbounded
    unbounded_sigma_bounds = (np.full_like(simulation.sigma, -np.inf),
                              np.full_like(simulation.sigma, +np.inf))
    unbounded_pi_bounds = None
    if simulation.pi is not None:
        unbounded_pi_bounds = (np.full_like(simulation.pi, -np.inf),
                               np.full_like(simulation.pi, +np.inf))
    unbounded_results = solve(unbounded_sigma_bounds, unbounded_pi_bounds)

    # choose an element in each parameter matrix and identify its estimated value
    sigma_index = (simulation.sigma.nonzero()[0][0],
                   simulation.sigma.nonzero()[1][0])
    sigma_value = unbounded_results.sigma[sigma_index]
    pi_index = None
    if simulation.pi is not None:
        pi_index = (simulation.pi.nonzero()[0][0],
                    simulation.pi.nonzero()[1][0])
        pi_value = unbounded_results.pi[pi_index]

    # use different types of binding bounds and skip types that fix all parameters
    for lb_scale, ub_scale in [(+np.inf, -0.1), (-0.1, +np.inf), (+1, -0.1),
                               (-0.1, +1), (0, 0)]:
        binding_sigma_bounds = (np.full_like(simulation.sigma, -np.inf),
                                np.full_like(simulation.sigma, +np.inf))
        binding_sigma_bounds[0][
            sigma_index] = sigma_value - lb_scale * np.abs(sigma_value)
        binding_sigma_bounds[1][
            sigma_index] = sigma_value + ub_scale * np.abs(sigma_value)
        if simulation.pi is None:
            binding_pi_bounds = None
            if np.array_equal(*map(np.abs, binding_sigma_bounds)):
                continue
        else:
            binding_pi_bounds = (np.full_like(simulation.pi, -np.inf),
                                 np.full_like(simulation.pi, +np.inf))
            binding_pi_bounds[0][
                pi_index] = pi_value - lb_scale * np.abs(pi_value)
            binding_pi_bounds[1][
                pi_index] = pi_value + ub_scale * np.abs(pi_value)
            if np.array_equal(
                    *map(np.abs, binding_sigma_bounds)) and np.array_equal(
                        *map(np.abs, binding_pi_bounds)):
                continue

        # solve the problem with binding bounds and test that they are essentially respected
        binding_results = solve(binding_sigma_bounds, binding_pi_bounds)
        assert_array_less = lambda a, b: np.testing.assert_array_less(
            a, b + 1e-14, verbose=True)
        assert_array_less(binding_sigma_bounds[0], binding_results.sigma)
        assert_array_less(binding_results.sigma, binding_sigma_bounds[1])
        if simulation.pi is not None:
            assert_array_less(binding_pi_bounds[0], binding_results.pi)
            assert_array_less(binding_results.pi, binding_pi_bounds[1])

    # for methods other than TNC, which works differently with bounds, test that non-binding bounds furnish results that
    #   are similar to their unbounded counterparts
    if method != 'tnc':
        unbinding_sigma_bounds = (simulation.sigma -
                                  1e10 * np.abs(simulation.sigma),
                                  simulation.sigma +
                                  1e10 * np.abs(simulation.sigma))
        unbinding_pi_bounds = None
        if simulation.pi is not None:
            unbinding_pi_bounds = (simulation.pi -
                                   1e10 * np.abs(simulation.pi),
                                   simulation.pi +
                                   1e10 * np.abs(simulation.pi))
        unbinding_results = solve(unbinding_sigma_bounds, unbinding_pi_bounds)
        np.testing.assert_allclose(unbounded_results.sigma,
                                   unbinding_results.sigma,
                                   atol=0,
                                   rtol=0.1)
        if simulation.pi is not None:
            np.testing.assert_allclose(unbounded_results.pi,
                                       unbinding_results.pi,
                                       atol=0,
                                       rtol=0.1)
Ejemplo n.º 57
0
import numpy as np

#All 0s matrix
a = np.zeros([5, 5])
#print(a)

#All 0s matrix
b = np.ones([4, 4], dtype='int32')
#print(b)

#any other number

#print(np.full((2,2) , 99))

c = np.full_like(a, 4)
#print(c)

#random decimal numbers
random = np.random.rand(4, 2)
print(random)

#random integer values

# lower , upper , size
random = np.random.randint(0, 10, size=(3, 3))
print(random)

identity = np.identity(5)
print(identity)

#repeat an array
Ejemplo n.º 58
0
def plot_ppc(
    data,
    kind="kde",
    alpha=None,
    mean=True,
    figsize=None,
    textsize=None,
    data_pairs=None,
    var_names=None,
    coords=None,
    flatten=None,
    flatten_pp=None,
    num_pp_samples=None,
    random_seed=None,
    jitter=None,
    animated=False,
    animation_kwargs=None,
    legend=True,
    ax=None,
):
    """
    Plot for posterior predictive checks.

    Parameters
    ----------
    data : az.InferenceData object
        InferenceData object containing the observed and posterior
        predictive data.
    kind : str
        Type of plot to display (kde, cumulative, or scatter). Defaults to kde.
    alpha : float
        Opacity of posterior predictive density curves. Defaults to 0.2 for kind = kde
        and cumulative, for scatter defaults to 0.7
    mean : bool
        Whether or not to plot the mean posterior predictive distribution. Defaults to True
    figsize : tuple
        Figure size. If None it will be defined automatically.
    textsize: float
        Text size scaling factor for labels, titles and lines. If None it will be
        autoscaled based on figsize.
    data_pairs : dict
        Dictionary containing relations between observed data and posterior predictive data.
        Dictionary structure:
        Key = data var_name
        Value = posterior predictive var_name
        For example, `data_pairs = {'y' : 'y_hat'}`
        If None, it will assume that the observed data and the posterior
        predictive data have the same variable name.
    var_names : list
        List of variables to be plotted. Defaults to all observed variables in the
        model if None.
    coords : dict
        Dictionary mapping dimensions to selected coordinates to be plotted.
        Dimensions without a mapping specified will include all coordinates for
        that dimension. Defaults to including all coordinates for all
        dimensions if None.
    flatten : list
        List of dimensions to flatten in observed_data. Only flattens across the coordinates
        specified in the coords argument. Defaults to flattening all of the dimensions.
    flatten_pp : list
        List of dimensions to flatten in posterior_predictive. Only flattens across the coordinates
        specified in the coords argument. Defaults to flattening all of the dimensions.
        Dimensions should match flatten excluding dimensions for data_pairs parameters.
        If flatten is defined and flatten_pp is None, then `flatten_pp=flatten`.
    num_pp_samples : int
        The number of posterior predictive samples to plot. For `kind` = 'scatter' and
        `animation = False` if defaults to a maximum of 5 samples and will set jitter to 0.7
        unless defined otherwise. Otherwise it defaults to all provided samples.
    random_seed : int
        Random number generator seed passed to numpy.random.seed to allow
        reproducibility of the plot. By default, no seed will be provided
        and the plot will change each call if a random sample is specified
        by `num_pp_samples`.
    jitter : float
        If kind is "scatter", jitter will add random uniform noise to the height
        of the ppc samples and observed data. By default 0.
    animated : bool
        Create an animation of one posterior predictive sample per frame. Defaults to False.
    animation_kwargs : dict
        Keywords passed to `animation.FuncAnimation`.
    legend : bool
        Add legend to figure. By default True.
    ax : axes
        Matplotlib axes. Defaults to None.

    Returns
    -------
    axes : matplotlib axes

    Examples
    --------
    Plot the observed data KDE overlaid on posterior predictive KDEs.

    .. plot::
        :context: close-figs

        >>> import arviz as az
        >>> data = az.load_arviz_data('radon')
        >>> az.plot_ppc(data)

    Plot the overlay with empirical CDFs.

    .. plot::
        :context: close-figs

        >>> az.plot_ppc(data, kind='cumulative')

    Use the coords and flatten parameters to plot selected variable dimensions
    across multiple plots.

    .. plot::
        :context: close-figs

        >>> az.plot_ppc(data, coords={'observed_county': ['ANOKA', 'BELTRAMI']}, flatten=[])

    Plot the overlay using a stacked scatter plot that is particularly useful
    when the sample sizes are small.

    .. plot::
        :context: close-figs

        >>> az.plot_ppc(data, kind='scatter', flatten=[],
        >>>             coords={'observed_county': ['AITKIN', 'BELTRAMI']})

    Plot random posterior predictive sub-samples.

    .. plot::
        :context: close-figs

        >>> az.plot_ppc(data, num_pp_samples=30, random_seed=7)
    """
    for group in ("posterior_predictive", "observed_data"):
        if not hasattr(data, group):
            raise TypeError(
                '`data` argument must have the group "{group}" for ppcplot'.
                format(group=group))

    if kind.lower() not in ("kde", "cumulative", "scatter"):
        raise TypeError(
            "`kind` argument must be either `kde`, `cumulative`, or `scatter`")

    if data_pairs is None:
        data_pairs = {}

    if animation_kwargs is None:
        animation_kwargs = {}
    if platform.system() == "Linux":
        animation_kwargs.setdefault("blit", True)
    else:
        animation_kwargs.setdefault("blit", False)

    if animated and animation_kwargs["blit"] and platform.system() != "Linux":
        _log.warning(
            "If you experience problems rendering the animation try setting"
            "`animation_kwargs({'blit':False}) or changing the plotting backend (e.g. to TkAgg)"
        )

    if alpha is None:
        if animated:
            alpha = 1
        else:
            if kind.lower() == "scatter":
                alpha = 0.7
            else:
                alpha = 0.2

    if jitter is None:
        jitter = 0.0
    assert jitter >= 0.0

    observed = data.observed_data
    posterior_predictive = data.posterior_predictive

    if var_names is None:
        var_names = list(observed.data_vars)
    var_names = _var_names(var_names, observed)
    pp_var_names = [data_pairs.get(var, var) for var in var_names]
    pp_var_names = _var_names(pp_var_names, posterior_predictive)

    if flatten_pp is None and flatten is None:
        flatten_pp = list(posterior_predictive.dims.keys())
    elif flatten_pp is None:
        flatten_pp = flatten
    if flatten is None:
        flatten = list(observed.dims.keys())

    if coords is None:
        coords = {}

    if random_seed is not None:
        np.random.seed(random_seed)

    total_pp_samples = posterior_predictive.sizes[
        "chain"] * posterior_predictive.sizes["draw"]
    if num_pp_samples is None:
        if kind == "scatter" and not animated:
            num_pp_samples = min(5, total_pp_samples)
        else:
            num_pp_samples = total_pp_samples

    if (not isinstance(num_pp_samples, Integral) or num_pp_samples < 1
            or num_pp_samples > total_pp_samples):
        raise TypeError("`num_pp_samples` must be an integer between 1 and " +
                        "{limit}.".format(limit=total_pp_samples))

    pp_sample_ix = np.random.choice(total_pp_samples,
                                    size=num_pp_samples,
                                    replace=False)

    for key in coords.keys():
        coords[key] = np.where(np.in1d(observed[key], coords[key]))[0]

    obs_plotters = filter_plotters_list(
        list(
            xarray_var_iter(observed.isel(coords),
                            skip_dims=set(flatten),
                            var_names=var_names,
                            combined=True)),
        "plot_ppc",
    )
    length_plotters = len(obs_plotters)
    pp_plotters = [
        tup for _, tup in zip(
            range(length_plotters),
            xarray_var_iter(
                posterior_predictive.isel(coords),
                var_names=pp_var_names,
                skip_dims=set(flatten_pp),
                combined=True,
            ),
        )
    ]
    rows, cols = default_grid(length_plotters)

    (figsize, ax_labelsize, _, xt_labelsize, linewidth,
     markersize) = _scale_fig_size(figsize, textsize, rows, cols)

    if ax is None:
        fig, axes = _create_axes_grid(length_plotters,
                                      rows,
                                      cols,
                                      figsize=figsize)
    else:
        axes = np.ravel(ax)
        if len(axes) != length_plotters:
            raise ValueError(
                "Found {} variables to plot but {} axes instances. They must be equal."
                .format(length_plotters, len(axes)))
        if animated:
            fig = axes[0].get_figure()
            if not all([ax.get_figure() is fig for ax in axes]):
                raise ValueError(
                    "All axes must be on the same figure for animation to work"
                )

    for i, ax_i in enumerate(axes):
        var_name, selection, obs_vals = obs_plotters[i]
        pp_var_name, _, pp_vals = pp_plotters[i]
        dtype = posterior_predictive[pp_var_name].dtype.kind

        # flatten non-specified dimensions
        obs_vals = obs_vals.flatten()
        pp_vals = pp_vals.reshape(total_pp_samples, -1)
        pp_sampled_vals = pp_vals[pp_sample_ix]

        if kind == "kde":
            plot_kwargs = {
                "color": "C5",
                "alpha": alpha,
                "linewidth": 0.5 * linewidth
            }
            if dtype == "i":
                plot_kwargs["drawstyle"] = "steps-pre"
            ax_i.plot([],
                      color="C5",
                      label="Posterior predictive {}".format(pp_var_name))

            if dtype == "f":
                plot_kde(
                    obs_vals,
                    label="Observed {}".format(var_name),
                    plot_kwargs={
                        "color": "k",
                        "linewidth": linewidth,
                        "zorder": 3
                    },
                    fill_kwargs={"alpha": 0},
                    ax=ax_i,
                    legend=legend,
                )
            else:
                bins = get_bins(obs_vals)
                hist, bin_edges = histogram(obs_vals, bins=bins)
                hist = np.concatenate((hist[:1], hist))
                ax_i.plot(
                    bin_edges,
                    hist,
                    label="Observed {}".format(var_name),
                    color="k",
                    linewidth=linewidth,
                    zorder=3,
                    drawstyle=plot_kwargs["drawstyle"],
                )

            pp_densities = []
            pp_xs = []
            for vals in pp_sampled_vals:
                vals = np.array([vals]).flatten()
                if dtype == "f":
                    pp_density, lower, upper = _fast_kde(vals)
                    pp_x = np.linspace(lower, upper, len(pp_density))
                    pp_densities.append(pp_density)
                    pp_xs.append(pp_x)
                else:
                    bins = get_bins(vals)
                    hist, bin_edges = histogram(vals, bins=bins)
                    hist = np.concatenate((hist[:1], hist))
                    pp_densities.append(hist)
                    pp_xs.append(bin_edges)

            if animated:
                animate, init = _set_animation(pp_sampled_vals,
                                               ax_i,
                                               dtype=dtype,
                                               kind=kind,
                                               plot_kwargs=plot_kwargs)

            else:
                if dtype == "f":
                    ax_i.plot(np.transpose(pp_xs), np.transpose(pp_densities),
                              **plot_kwargs)
                else:
                    for x_s, y_s in zip(pp_xs, pp_densities):
                        ax_i.plot(x_s, y_s, **plot_kwargs)

            if mean:
                if dtype == "f":
                    rep = len(pp_densities)
                    len_density = len(pp_densities[0])

                    new_x = np.linspace(np.min(pp_xs), np.max(pp_xs),
                                        len_density)
                    new_d = np.zeros((rep, len_density))
                    bins = np.digitize(pp_xs, new_x, right=True)
                    new_x -= (new_x[1] - new_x[0]) / 2
                    for irep in range(rep):
                        new_d[irep][bins[irep]] = pp_densities[irep]
                    ax_i.plot(
                        new_x,
                        new_d.mean(0),
                        color="C0",
                        linestyle="--",
                        linewidth=linewidth,
                        zorder=2,
                        label="Posterior predictive mean {}".format(
                            pp_var_name),
                    )
                else:
                    vals = pp_vals.flatten()
                    bins = get_bins(vals)
                    hist, bin_edges = histogram(vals, bins=bins)
                    hist = np.concatenate((hist[:1], hist))
                    ax_i.plot(
                        bin_edges,
                        hist,
                        color="C0",
                        linewidth=linewidth,
                        label="Posterior predictive mean {}".format(
                            pp_var_name),
                        zorder=2,
                        linestyle="--",
                        drawstyle=plot_kwargs["drawstyle"],
                    )
            ax_i.tick_params(labelsize=xt_labelsize)
            ax_i.set_yticks([])

        elif kind == "cumulative":
            drawstyle = "default" if dtype == "f" else "steps-pre"
            ax_i.plot(*_empirical_cdf(obs_vals),
                      color="k",
                      linewidth=linewidth,
                      label="Observed {}".format(var_name),
                      drawstyle=drawstyle,
                      zorder=3)
            if animated:
                animate, init = _set_animation(
                    pp_sampled_vals,
                    ax_i,
                    kind=kind,
                    alpha=alpha,
                    drawstyle=drawstyle,
                    linewidth=linewidth,
                )

            else:
                pp_densities = np.empty(
                    (2 * len(pp_sampled_vals), pp_sampled_vals[0].size))
                for idx, vals in enumerate(pp_sampled_vals):
                    vals = np.array([vals]).flatten()
                    pp_x, pp_density = _empirical_cdf(vals)
                    pp_densities[2 * idx] = pp_x
                    pp_densities[2 * idx + 1] = pp_density

                ax_i.plot(*pp_densities,
                          alpha=alpha,
                          color="C5",
                          drawstyle=drawstyle,
                          linewidth=linewidth)
            ax_i.plot([],
                      color="C5",
                      label="Posterior predictive {}".format(pp_var_name))
            if mean:
                ax_i.plot(
                    *_empirical_cdf(pp_vals.flatten()),
                    color="C0",
                    linestyle="--",
                    linewidth=linewidth,
                    drawstyle=drawstyle,
                    label="Posterior predictive mean {}".format(pp_var_name))
            ax_i.set_yticks([0, 0.5, 1])

        elif kind == "scatter":
            if mean:
                if dtype == "f":
                    plot_kde(
                        pp_vals.flatten(),
                        plot_kwargs={
                            "color": "C0",
                            "linestyle": "--",
                            "linewidth": linewidth,
                            "zorder": 3,
                        },
                        label="Posterior predictive mean {}".format(
                            pp_var_name),
                        ax=ax_i,
                        legend=legend,
                    )
                else:
                    vals = pp_vals.flatten()
                    bins = get_bins(vals)
                    hist, bin_edges = histogram(vals, bins=bins)
                    hist = np.concatenate((hist[:1], hist))
                    ax_i.plot(
                        bin_edges,
                        hist,
                        color="C0",
                        linewidth=linewidth,
                        label="Posterior predictive mean {}".format(
                            pp_var_name),
                        zorder=3,
                        linestyle="--",
                        drawstyle="steps-pre",
                    )

            _, limit = ax_i.get_ylim()
            limit *= 1.05
            y_rows = np.linspace(0, limit, num_pp_samples + 1)
            jitter_scale = y_rows[1] - y_rows[0]
            scale_low = 0
            scale_high = jitter_scale * jitter

            obs_yvals = np.zeros_like(obs_vals, dtype=np.float64)
            if jitter:
                obs_yvals += np.random.uniform(low=scale_low,
                                               high=scale_high,
                                               size=len(obs_vals))
            ax_i.plot(
                obs_vals,
                obs_yvals,
                "o",
                color="C0",
                markersize=markersize,
                alpha=alpha,
                label="Observed {}".format(var_name),
                zorder=4,
            )

            if animated:
                animate, init = _set_animation(
                    pp_sampled_vals,
                    ax_i,
                    kind=kind,
                    height=y_rows.mean() * 0.5,
                    markersize=markersize,
                )

            else:
                for vals, y in zip(pp_sampled_vals, y_rows[1:]):
                    vals = np.ravel(vals)
                    yvals = np.full_like(vals, y, dtype=np.float64)
                    if jitter:
                        yvals += np.random.uniform(low=scale_low,
                                                   high=scale_high,
                                                   size=len(vals))
                    ax_i.plot(vals,
                              yvals,
                              "o",
                              zorder=2,
                              color="C5",
                              markersize=markersize,
                              alpha=alpha)

            ax_i.plot([],
                      "C5o",
                      label="Posterior predictive {}".format(pp_var_name))

            ax_i.set_yticks([])

        if var_name != pp_var_name:
            xlabel = "{} / {}".format(var_name, pp_var_name)
        else:
            xlabel = var_name
        ax_i.set_xlabel(make_label(xlabel, selection), fontsize=ax_labelsize)

        if legend:
            if i == 0:
                ax_i.legend(fontsize=xt_labelsize * 0.75)
            else:
                ax_i.legend([])

    if animated:
        ani = animation.FuncAnimation(fig,
                                      animate,
                                      np.arange(0, num_pp_samples),
                                      init_func=init,
                                      **animation_kwargs)
        return axes, ani
    else:
        return axes
Ejemplo n.º 59
0
def par_trend(n, input_param):
    """
    :param n:               int: parallel index
    :param input_param:     type(dict):
                            {'dt':          time-date array
                             'path2data':   path where to read data series for which calculating the trend
                             'path2slopes': path where to save the calculated trend infos
                             'data_mask':   mask array, set the index of valid pixels (not-nan)
                             'head':        head defining the name of temporary data to read
                             'step':        used to define the index of the matrix-chunk relative to the current loop
                             'nloops':      parallel loop index number
                             'dbg':         enables debug mode
                             'fid':         object of kind open(filename) points to the log_file where to write
                                            debug output
                             'frequency'    frequency of the timeseries, i.e. how many observations per year
    :return:

        Save a npy temporary file into the save_path directory for each loop of parallel cycle


    """
    dt = input_param['dt']
    d_path = input_param['path2data']
    s_path = input_param['path2slopes']
    head = input_param['head']
    wm = input_param['data_mask']
    step = input_param['step']
    nloops = input_param['nloops']
    dbg = input_param['dbg']
    fid = input_param['fid']
    frequency = input_param['frequency']
    threshold = input_param['threshold']

    if dbg:
        fid.writelines('start parallel loop index ' + str(n) + '\n')

    sl_name = s_path + head + '-' + str(n).zfill(2) + '.npy'
    if not os.path.exists(sl_name):
        i0 = step * n
        i1 = (n + 1) * step
        if n + 1 == nloops:
            i1 = None
        # reading temporary data chunk as saved in previous step
        if dbg:
            fid.writelines('reading chunk ' + str(n) + '\n')
        data = np.load(d_path + head + '-' + str(n).zfill(2) + '.npy')
        wm = wm[i0:i1]
        ind_good = np.where(wm != 0)

        slopes = np.full_like(wm, fill_value=np.nan)
        interc = np.full_like(wm, fill_value=np.nan)
        pvalue = np.full_like(wm, fill_value=np.nan)

        for k in ind_good[0]:
            d = data[k, :]
            ts = pd.Series(d, index=pd.to_datetime(dt))

            trend_out = mk.seasonal_test(ts, period=frequency, alpha=threshold)

            slopes[k] = trend_out.slope / frequency
            interc[k] = trend_out.intercept
            pvalue[k] = trend_out.p

        np.save(sl_name, [slopes, interc, pvalue])

        if dbg:
            fid.writelines('end parallel loop index ' + str(n) + '\n')
Ejemplo n.º 60
0
d = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
print(d)
print(d[0, :, 0])
print('replace')
d[:, 0, :] = [[0, 0], [0, 0]]
print(d)

print('Different types of Arrays:')
print('All 0s matrix')
print(np.zeros((2, 3, 3)))
print('All ones')
print(np.ones((3, 3)))
print('Any other values:')
print(np.full((4, 2), 55))
print('Any other values(full_like):')
print(np.full_like(b, 44))
print(' or:')
print(np.full(b.shape, 33, b.dtype))

print('Random decimal(десятичная дробь) numbers:')
print(np.random.rand(3, 4))
print(np.random.random_sample(b.shape))
print('Random integer numbers:')
print(np.random.randint(5, 100, size=(3, 4)))
print(np.random.randint(10, size=(10, 5)))

print('The identity matrix (единичная)')
print(np.identity(5))

print('repeat an array')
arr = np.array([[1, 2, 3]])