def svm_model(train_data_features, train_data_split_crossfold_features, test_data_features, labels, labels_cross_validation_classwise, using_cross_validation2, kf, settings):
    if using_cross_validation2:
        C_base = 4.5
        C_step = 0.5#0.005
        C = C_base
        _results = []
        if(len(train_data_cross_validation_classwise_features) > 0):
            """train_all = np.append(train_data_features, train_data_cross_validation_classwise_features, axis=0)
            labels_all = np.append(labels, labels_cross_validation_classwise)
            kf_all = KFold(len(train_all)-1, n_folds=int(settings['Data']['CrossValidation2']), shuffle=True)
            for train, test in kf_all:
                svc = SVC(kernel="linear", C=C, probability=True)
                model = svc.fit(train_all[train], labels_all[train])
                predicted_classes = model.predict(train_all[test])
                predicted_classes_train = model.predict(train_all[train])
                class_probabilities = model.predict_proba(train_all[test])
                print("C: ",C," n points:", len(predicted_classes), " percentage: ",(labels_all[test] != predicted_classes).sum()*100/len(predicted_classes),"% percentage_train: ", (labels_all[train] != predicted_classes_train).sum()*100/len(predicted_classes_train),"%")
                _results.append((labels_all[test] != predicted_classes).sum())
                C += C_step"""
            for c in pl.frange(C_base,9, C_step):
                svc = SVC(kernel="linear", C=c, probability=True)
                model = svc.fit(train_data_features, labels)
                predicted_classes = model.predict(train_data_cross_validation_classwise_features)
                class_probabilities = model.predict_proba(train_data_cross_validation_classwise_features)
                print("C: ",c," N points:", len(predicted_classes), " percentage: ",(labels_cross_validation_classwise != predicted_classes).sum()*100/len(predicted_classes),"%")
                print("Log_loss: ", log_loss(labels_cross_validation_classwise, class_probabilities))
            for c in pl.frange(1,3, 1):
                svc = SVC(kernel="linear", C=c, probability=True)
                model = svc.fit(train_data_features, labels)
                predicted_classes = model.predict(train_data_cross_validation_classwise_features)
                class_probabilities = model.predict_proba(train_data_cross_validation_classwise_features)
                print("C: ",c," N points:", len(predicted_classes), " percentage: ",(labels_cross_validation_classwise != predicted_classes).sum()*100/len(predicted_classes),"%")
                print("Log_loss: ", log_loss(labels_cross_validation_classwise, class_probabilities))
        else:
            for train, test in kf:
                svc = SVC(kernel="linear", C=C, probability=True)
                model = svc.fit(train_data_features[train], labels[train])
                predicted_classes = model.predict(train_data_features[test])
                predicted_classes_train = model.predict(train_data_features[train])
                class_probabilities = model.predict_proba(train_data_features[test])
                print("C: ",C," n points:", len(predicted_classes), " percentage: ",(labels[test] != predicted_classes).sum()*100/len(predicted_classes),"% percentage_train: ", (labels[train] != predicted_classes_train).sum()*100/len(predicted_classes_train),"%")
                _results.append((labels[test] != predicted_classes).sum())
                C += C_step

        C = C_base + C_step * _results.index(min(_results))
        print("C: ", C)
        if(len(train_data_cross_validation_classwise_features) > 0):
            svc = SVC(kernel="linear", C=C, probability=True)
            model = svc.fit(train_data_features, labels)
            predicted_classes = model.predict(train_data_cross_validation_classwise_features)
            class_probabilities = model.predict_proba(train_data_cross_validation_classwise_features)
            print("C: ",C," N points:", len(predicted_classes), " percentage: ",(labels_cross_validation_classwise != predicted_classes).sum()*100/len(predicted_classes),"%")
            print("Log_loss: ", log_loss(labels_cross_validation_classwise, class_probabilities))
        svc = SVC(kernel="linear", C=C, probability=True)
        model = svc.fit(train_data_features, labels)
        return model.predict_proba(test_data_features), model.predict(test_data_features), model
    else:
        svc = SVC(kernel="linear", C=8, probability=True)
        model = svc.fit(train_data_features, labels)
        return model.predict_proba(test_data_features), model.predict(test_data_features), model
def simulate(lat, lon, wind, date, time):
    global mytopic
    max_lat = lat + 1.0000
    min_lat = lat - 1.0000
    max_lon = lon + 1.0000
    min_lon = lon - 1.0000
    # change when higher resolution wanted
    lat_interval = 0.05
    lon_interval = 0.05
    lat_range = pl.frange(min_lat, max_lat, lat_interval)
    lon_range = pl.frange(min_lon, max_lon, lon_interval)
    # if lat in lat_range:
    # 	lat_range.remove(lat)
    # if lon in lon_range:
    # 	lon_range.remove(lon)
    for i in lat_range:
        for j in lon_range:
            # create random num between -1 and 1
            # wind_i = wind + wind * num
            gust = random.uniform(-1, 1)
            wind_i = wind + (wind * gust)
            if i is not lat and j is not lon:
                data = (str(
                    str(lon) + ',' + str(lat) + ',' + str(int(wind)) + ',' +
                    date + ',' + time))
                producer.send(mytopic, data)
                print 'wind_sim: ', wind_i
示例#3
0
def arhex_joint_positions_publisher(pub1,pub2,pub3,pub4,pub5,pub6):

    #Initiate node for controlling joint1 and joint2 positions.


    #Define publishers for each joint position controller commands.
    rate = rospy.Rate(100)
    #While loop to have joints follow a certain position, while rospy is not shutdown.
    i = 0
    #stand = 0.01
    #support = angles.pi*40/180
    stand = angles.pi*90/180
    while not rospy.is_shutdown():
        #msg = rospy.wait_for_message('/arhex/joint_states', JointState)
        #stand = angles.pi*90/180
        for i in pl.frange(0,45,1):

            support = angles.pi*i/180
            pub1.publish(support)
            rate.sleep()

        for i in pl.frange(45.3,135,0.333):

            support = angles.pi*i/180
            pub1.publish(support)
            rate.sleep()

        for i in pl.frange(135,360,1):

            support = angles.pi*i/180
            pub1.publish(support)
            rate.sleep()
def getPlane(
    point, normal, radiusX, radiusY, Nx, Ny
):  #equation of plane is a*x+b*y+c*z+d=0  [a,b,c] is the normal. Thus, we have to calculate d and we're set
    d = -1 * numpy.dot(point, normal)
    minIndex = normal.index(min(normal))
    n1 = -normal[(minIndex + 1) % 3]
    n2 = normal[(minIndex + 2) % 3]
    #u = ([0, n2, n1])/numpy.linalg.norm([0,n1,n2]) #This is the vector inside the plane, perp. to the normal vector
    if normal[0] == 0 and normal[1] == 0:
        u = [0, 1, 0]
    else:
        u = [-normal[1], normal[0], 0]
    v = numpy.cross(u, normal)

    u = numpy.array(u) / numpy.linalg.norm(u)
    v = numpy.array(v) / numpy.linalg.norm(v)

    points_in_plane = []

    deltaX = radiusX / Nx
    epsilonX = deltaX * 0.5
    deltaY = radiusY / Ny
    epsilonY = deltaY * 0.5

    for y in pylab.frange(
            -radiusY, radiusY + epsilonY, deltaY
    ):  #Epsilon makes sure point count is symmetric and we don't miss points on extremes
        for x in pylab.frange(-radiusX, radiusX + epsilonX, deltaX):
            points_in_plane.append(point + x * u + y * v)

    return points_in_plane
示例#5
0
def optimal_block_sim_threshold_min_block_dist_similarity(exe_name_1,
                                                          exe_name_2,
                                                          num_of_funcs):
    func_set = Function.objects.exclude(graph__num_of_blocks=1)
    exe1, exe2 = get_intersecting_func_names(func_set, exe_name_1,
                                             exe_name_2)
    index_list = random.sample(range(len(exe1)), num_of_funcs)
    funcs1 = [exe1[i] for i in index_list];
    funcs2 = [exe2[i] for i in index_list];
    best_block_sim_threshold = 0
    best_min_block_dist_similarity = 0
    best_delta = float("-infinity")
    for block_sim_threshold in pl.frange(0, 0.8, 0.1):
        for min_block_dist_similarity in pl.frange(0.5, 0.8, 0.1):
            print ("current", block_sim_threshold, min_block_dist_similarity)
            print ("best", best_block_sim_threshold, best_min_block_dist_similarity)
            test_dict = {  # "log_decisions": True,
                         "block_similarity_threshold": block_sim_threshold,
                         "min_block_dist_similarity": min_block_dist_similarity,
                         "association_graph_max_size": 5000}
            delta = \
                get_optimal_threshold(funcs1, funcs2, test_dict=test_dict)

            if best_delta < delta:
                best_delta = delta
                print "best delta: " + str(best_delta)
                best_block_sim_threshold = block_sim_threshold
                best_min_block_dist_similarity = min_block_dist_similarity

    print ("best_delta: " +
           str(best_delta) +
           ", best_block_sim_threshold: " +
           str(best_block_sim_threshold) +
           ", best_min_block_dist_similarity: " +
           str(best_min_block_dist_similarity))
示例#6
0
def ZapisDoPliku(plik):
    plik.write("t[s]\tv[m/s]\ts[m]\n");
    s1 = []
    for i in pylab.frange(0.000, Czas_Reakcji, 0.001):
        Droga = float(Predkosc * i)
        s1.append(Droga)  # dopisanie wyniku na koniec listy
        Predkosc_Chwilowa = float(Predkosc)
        bufor = str("{0:.3f}".format(i))  # zamiana wartosci i (czas) na string, wymagania funkcji write by na wejsciu byl string
        plik.write(bufor)  # zapisanie do pliku wartosci i (czas)
        plik.write("\t")  # zrobienie tabulacji w pliku
        bufor2 = str("{0:.3f}".format(Predkosc_Chwilowa))  # zamiana wartosci v2 na string, wymagania funkcji write
        plik.write(bufor2)  # zapisanie do pliku wartosci v2
        plik.write("\t")  # zrobienie tabulacji w pliku
        bufor3 = str("{0:.3f}".format(Droga))  # zamiana wartosci v2 na string, wymagania funkcji write
        plik.write(bufor3)  # zapisanie do pliku wartosc
        plik.write('\n')  # przejscie do nowej lini w pliku
        if (i == Czas_Reakcji): Zapamietana = Droga

    for i in pylab.frange(0.001, Czas, 0.001):
        Droga = float((Predkosc * i)-(Przyspieszenie * i ** 2) / 2+Zapamietana)
        Predkosc_Chwilowa=Predkosc-(i*Przyspieszenie)
        s1.append(Droga)#dopisanie wyniku na koniec listy
        bufor4 = str("{0:.3f}".format(i + Czas_Reakcji))  # zamiana wartosci i (czas) na string, wymagania funkcji write
        plik.write(bufor4)  # zapisanie do pliku wartosci i (czas)
        plik.write("\t")  # zrobienie tabulacji w pliku
        bufor5 = str("{0:.3f}".format(Predkosc_Chwilowa))  # zamiana wartosci v2 na string, wymagania funkcji write
        plik.write(bufor5)  # zapisanie do pliku wartosci v2
        plik.write("\t")  # zrobienie tabulacji w pliku
        bufor6 = str("{0:.3f}".format(Droga + Zapamietana))  # zamiana wartosci Droga na string, wymagania funkcji write
        plik.write(bufor6)  # zapisanie do pliku wartosc
        plik.write('\n')  # przejscie do nowej lini w pliku
示例#7
0
def wykres1(CzyPrzykladowe):
    #Obliczanie wartosci
    t = pylab.frange(0.000, Czascal, 0.001)# lista argumentów osi x od <0;czas>, wykorzystanie biblioteki, frange obsługuje float
    s1 = []# lista wartości, lista, wyniki obliczeń na dole wpisywane
    if(CzyPrzykladowe==0):
        s2=[]
        s3=[]
        s4=[]
        s5=[] # dla innych wsp.

    for i in pylab.frange(0.000,Czas_Reakcji,0.001):
        Droga = float(Predkosc * i)
        s1.append(Droga)#dopisanie wyniku na koniec listy
        if (CzyPrzykladowe == 0):
            s2.append(Droga)
            s3.append(Droga)
            s4.append(Droga)
            s5.append(Droga)
        if(i>=Czas_Reakcji): Zapamietana = Droga


    for i in pylab.frange(0.001, Czas, 0.001):
        LimDroga=float((Predkosc * i)-(a4 * i ** 2) / 2+Zapamietana)
        Droga = float((Predkosc * i)-(Przyspieszenie * i ** 2) / 2+Zapamietana)
        s1.append(Droga)#dopisanie wyniku na koniec listy
        if (CzyPrzykladowe == 0):
            Drogaa = float((Predkosc * i)-(a1 * i ** 2) / 2+Zapamietana)
            if (i<=t1): s2.append(Drogaa)
            else: s2.append(dr1) #Ify dodane, by nie liczylo (a potem nie rysowalo) ujemnej drogi i predkosci
            Drogaa = float((Predkosc * i)-(a2 * i ** 2) / 2+Zapamietana)
            if (i<=t2): s3.append(Drogaa)
            else: s3.append(dr2)
            Drogaa = float((Predkosc * i)-(a3 * i ** 2) / 2+Zapamietana)
            if (i<=t3): s4.append(Drogaa)
            else: s4.append(dr3)
            Drogaa = float((Predkosc * i)-(a4 * i ** 2) / 2+Zapamietana)
            if (i<=t4): s5.append(Drogaa)
            else: s5.append(dr4)

#Wykres droga
    pylab.figure() #stworzenie oddzielnej instancji dla plota
    pylab.plot(t, s1)#podanie danychdo wykresu
    pylab.text(ZwrocCzasCal() / 10, LimDroga - 2 * (LimDroga/8), ' '.join(['Ft=', str(Tarcie)]), fontsize=10, color='#1009f2')  # (Pozycja x, Pozycja y, Podpis, Wielkosc fonta, Kolor)
    if (CzyPrzykladowe == 0):
        pylab.plot(t, s2)
        pylab.text(ZwrocCzasCal()/10, LimDroga - 3*(LimDroga/8) , 'Ft=0.9',fontsize=10,color='#2f7013')
        pylab.plot(t, s3)
        pylab.text(ZwrocCzasCal()/10, LimDroga - 4*(LimDroga/8), 'Ft=0.6',fontsize=10, color='#c72923')
        pylab.plot(t, s4)
        pylab.text(ZwrocCzasCal()/10, LimDroga - 5*(LimDroga/8), 'Ft=0.3',fontsize=10,color='#4dd3ed')
        pylab.plot(t, s5)
        pylab.text(ZwrocCzasCal()/10, LimDroga - 6*(LimDroga/8), 'Ft=0.05',fontsize=10,color='#b56afe')
    pylab.ylabel('Droga [m]', fontsize=10)#opis osi y
    pylab.xlabel('Czas [s]', fontsize=10)#opis osi x
    pylab.title('Zaleznosc s[t] w trakcie hamowania', fontsize=11)#Tytuł wykresu
    pylab.grid(True)#wykres ma byc ciagly
    pylab.savefig('S_od_t.png', dpi=300)  # zapis wykresu do pliku automatycznie
    fig = pylab.gcf()
    fig.canvas.set_window_title(u'Zalezność s[t] w trakcie hamowania')  # Tytuł okienka z wykresami
    pylab.show()
示例#8
0
def timings(exe_name_1, exe_name_2, num_of_funcs):
    exe1, exe2 = get_intersecting_func_names(exe_name_1, exe_name_2)

    index_list = random.sample(range(len(exe1)), num_of_funcs)
    funcs1 = [exe1[i] for i in index_list]
    funcs2 = [exe2[i] for i in index_list]

    bst = []
    timing_dict = {}
    for block_sim_threshold in pl.frange(0, 0.8, 0.1):
        block_sim_threshold = round(block_sim_threshold, 1)
        mbds = []
        for min_block_dist_similarity in pl.frange(0, 0.8, 0.1):
            min_block_dist_similarity = round(min_block_dist_similarity, 1)
            test_dict = {  # "log_decisions": True,
                         "block_similarity_threshold": block_sim_threshold,
                         "min_block_dist_similarity": min_block_dist_similarity,
                         "association_graph_max_size": 5000}
            start = time.time()
            delta = get_optimal_threshold(funcs1, funcs2, test_dict=test_dict)
            elapsed = (time.time() - start)
            mbds.append(elapsed)
            print (block_sim_threshold, min_block_dist_similarity, elapsed)
            timing_dict[block_sim_threshold][min_block_dist_similarity] = (delta, elapsed)
            print elapsed
        bst.append(mbds)
    return timing_dict
def getPlane(
    point, normal, radiusX, radiusY, Nx, Ny
):  #Creates plane by expressing plane as linear combination of two vectors orthogonal to normal vector
    d = -1 * numpy.dot(point, normal)
    minIndex = normal.index(min(normal))
    n1 = -normal[(minIndex + 1) % 3]
    n2 = normal[(minIndex + 2) % 3]
    #u = ([0, n2, n1])/numpy.linalg.norm([0,n1,n2]) #This is the vector inside the plane, perp. to the normal vector
    if normal[0] == 0 and normal[1] == 0:
        u = [0, 1, 0]
    else:
        u = [-normal[1], normal[0], 0]
    v = numpy.cross(u, normal)

    u = numpy.array(u) / numpy.linalg.norm(
        u)  #Normalized vector orthogonal to normal
    v = numpy.array(v) / numpy.linalg.norm(
        v)  #Second normalized vector orthogonal to normal

    points_in_plane = []

    deltaX = radiusX / Nx
    epsilonX = deltaX * 0.5
    deltaY = radiusY / Ny
    epsilonY = deltaY * 0.5

    for y in pylab.frange(-radiusY, radiusY, deltaY):
        for x in pylab.frange(-radiusX, radiusX, deltaX):
            points_in_plane.append(point + x * u + y * v)
    return points_in_plane  #all points in grid
def generate_plot():
	h_bar = 6.582E-16
	q = 1
	a = 1E-10
	t = 1
	c = 3.0E8
	g = -2.002
	N = 1
	E = -1
	Ez = 1000
	eta = 0.01 + (0.01)*1.j
	sigma_x = np.array([[0,1],[1,0]])
	sigma_y = np.array([[0, -1.j],[1.j,0]])
	kxs = []
	alphas = []
	stxs = []
	stys = []
	for kx in pl.frange(0, 2*np.pi, 0.1):
		kxs.append(kx)
		kys = []
		alphas_row = []
		stxs_row = []
		stys_row = []
		for ky in pl.frange(0, 2*np.pi, 0.1):
			coeff = (-1)*g*q*(1/(h_bar**2))*(a**2)*(t**2)*(1/(2*c**2))
			#print(coeff)
			hamil = sparse.kron(np.identity(2, dtype=np.complex_), t*(np.cos(kx)+np.cos(ky)))
			hamil += coeff*(np.cos(kx) + np.cos(ky))*(Ez*np.sin(ky)*sigma_x - Ez*np.sin(kx)*sigma_y)
			E_arr = sparse.kron(np.identity(2, dtype=np.complex_),E).toarray()
			greens = linalg.inv(E_arr-hamil-eta)
			img = (greens - calc.hermitian(greens))/(2.j)
			stxs_row.append(np.trace(np.dot(img,sigma_x))/2)
			stys_row.append(np.trace(np.dot(img,sigma_y))/2)
			kys.append(ky)
			alpha = np.trace(img)/2
			alphas_row.append(alpha)
		#print(stxs_row)
		alphas.append(alphas_row)
		stxs.append(stxs_row)
		stys.append(stys_row)
		print(kx)
	print('loop over')	
	x, y = np.meshgrid(kxs, kys)
	print('here')
	#print(alphas)
	alphas = np.array(alphas)
	stxs = np.array(stxs)
	stys = np.array(stys)
	print(stxs)
	#print(alphas)
	#fig = plt.figure()
	plt.pcolormesh(x, y, alphas)
	#plt.pcolormesh(x,y,stxs)
	plt.quiver(x, y, stxs, stys, color='red', angles='xy', scale_units='xy', scale=1)
	#plt.quiver(x, y, stys, color='red', headlength=10)
	print('mesh complete')
	#plt.colorbar()
	plt.show()
 def _calc_scales():
   raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]
   min_scale = min(np.floor(np.log2(np.max(clusters_w[normal_idx] / raw_w))),
                   np.floor(np.log2(np.max(clusters_h[normal_idx] / raw_h))))
   max_scale = min(1.0, -np.log2(max(raw_h, raw_w) / MAX_INPUT_DIM))
   scales_down = pl.frange(min_scale, 0, 1.)
   scales_up = pl.frange(0.5, max_scale, 0.5)
   scales_pow = np.hstack((scales_down, scales_up))
   scales = np.power(2.0, scales_pow)
   return scales
示例#12
0
def wykres3(CzyPrzykladowe):
    vv = pylab.frange(0.00, 60, 0.01)
    ss = []
    ss1 = []
    ss2 = []
    ss3 = []
    ss4 = []
    for i in pylab.frange(0.00, 60, 0.01):  #3ci wykres
        Czazz = i / (Tarcie * g)
        if (CzyPrzykladowe == 0):
            Czazz1 = i / (0.9 * g)
            Czazz2 = i / (0.6 * g)
            Czazz3 = i / (0.3 * g)
            Czazz4 = i / (0.05 * g)
        Drogaaa = float((Czazz * i) - (Tarcie * Czazz**2) / 2)
        ss.append(Drogaaa)
        if (CzyPrzykladowe == 0):
            Drogaaa = float((Czazz1 * i) - (0.9 * Czazz1**2) / 2)
            ss1.append(Drogaaa)
            Drogaaa = float((Czazz2 * i) - (0.6 * Czazz2**2) / 2)
            ss2.append(Drogaaa)
            Drogaaa = float((Czazz3 * i) - (0.3 * Czazz3**2) / 2)
            ss3.append(Drogaaa)
            Drogaaa = float((Czazz4 * i) - (0.05 * Czazz4**2) / 2)
            ss4.append(Drogaaa)
    pylab.figure()
    pylab.plot(vv, ss)
    pylab.text(5,
               900,
               ' '.join(['Ft=', str(Tarcie)]),
               fontsize=11,
               color='#1009f2')
    if (CzyPrzykladowe == 0):
        pylab.text(5, 850, 'Ft=0.9', fontsize=11, color='#2f7013')
        pylab.text(5, 800, 'Ft=0.6', fontsize=11, color='#c72923')
        pylab.text(5, 750, 'Ft=0.3', fontsize=11, color='#4dd3ed')
        pylab.text(5, 700, 'Ft=0.05', fontsize=11, color='#b56afe')
        pylab.plot(vv, ss1)
        pylab.plot(vv, ss2)
        pylab.plot(vv, ss3)
        pylab.plot(vv, ss4)
    pylab.ylabel('Droga [m]', fontsize=10)
    pylab.xlabel('Predkosc [m/s]', fontsize=10)
    pylab.title('Zaleznosc drogi hamowania od predkosci poczatkowej',
                fontsize=11)  # Tytuł wykresu
    pylab.grid(True)
    pylab.xlim([0, 62])
    pylab.ylim([0, ((20 * 60) - (Tarcie * 20**2) / 2) + 2])
    pylab.savefig('Droga_hamowania_od_pred_pocz.png',
                  dpi=300)  # zapis wykresu do pliku automatycznie
    fig = pylab.gcf()
    fig.canvas.set_window_title(
        u'Zalezność drogi hamowania od prędkości początkowej'
    )  # Tytuł okienka z wykresami
    pylab.show()  # pokazanie wykresu
示例#13
0
def main():
    skip = False
    winSize = (64,128)
    blockSize = (16,16)
    blockStride = (8,8)
    cellSize = (8,8)
    nbins = 9
    derivAperture = 1
    winSigma = -1.
    histogramNormType = 0
    L2HysThreshold = 0.2
    gammaCorrection = 1
    nlevels = 64
    signedGradients = False
    #Inicializacion del HogDescriptor
    #hog = cv2.HOGDescriptor()
    hog = cv2.HOGDescriptor(winSize,blockSize,blockStride,cellSize,nbins,derivAperture,winSigma, histogramNormType,L2HysThreshold,gammaCorrection,nlevels,signedGradients)
    hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
    s = ""
    for i in pl.frange(1.05,1.5,0.05):
        s += ';'
        s += str(round(i,2))
    s += '\n'        
        
    #Carga la imagen en escala de grises y la reescala
    image = cv2.imread(settings.img_path, cv2.IMREAD_GRAYSCALE)
    
    for i in pl.frange(0.3,0.8,0.05):
        s += str(round(i,2))
        for j in pl.frange(1.05,1.5,0.05):
            counter = 0
            start = time.time()
            #for k in range(0,30):
            while((time.time() - start) < 1):
                #Saltea 1 frame
                if skip == False:        
                    imagetemp = cv2.resize(image,None,fx=i, fy=i, interpolation = cv2.INTER_CUBIC)
                    
                    #Calcula el Hog y hace la deteccion con SVM devolviendo los bounding boxes de los match
                    g = HogDescriptor(imagetemp,hog,j)
                skip = not skip
                counter += 1
                
            t = time.time() - start
            #FPS = t/30
            FPS = counter / (time.time() - start)         
            s += ";"
            s += str(round(FPS,2))
        s += '\n'
    f = open('csvfile2.txt','w')
    f.write(s.replace('.',','))
    f.close()
示例#14
0
def main():
    global msb_1_max_delay
    global msb_2_max_delay
    global msb_3_max_delay
    global msb_4_max_delay
    global apx_optimal_mode

    for apx_optimal_mode_el in apx_optimal_mode_l:
        print "-----------------"
        temp = list(apx_optimal_mode_el)
        apx_optimal_mode[0] = int(temp[0])
        apx_optimal_mode[1] = int(temp[1])
        apx_optimal_mode[2] = int(temp[2])
        apx_optimal_mode[3] = int(temp[3])


        for msb_1_max_delay_el in pylab.frange(msb_1_max_delay_optimal - .1,\
                msb_1_max_delay_optimal + .005, .005):
            msb_1_max_delay = msb_1_max_delay_el
            for msb_2_max_delay_el in pylab.frange(msb_2_max_delay_optimal - \
                    .1, msb_2_max_delay_optimal + .005, .005):
                if (apx_optimal_mode[1]
                        == 0) and not (apx_optimal_mode[0] == 0):
                    run_tool_chain()
                    break
                msb_2_max_delay = msb_2_max_delay_el
                for msb_3_max_delay_el in pylab.frange(msb_3_max_delay_optimal \
                        - .1, msb_3_max_delay_optimal + .008, .005):
                    if (apx_optimal_mode[2]
                            == 0) and not (apx_optimal_mode[1] == 0):
                        run_tool_chain()
                        break
                    msb_3_max_delay = msb_3_max_delay_el
                    for msb_4_max_delay_el in \
                    pylab.frange(msb_4_max_delay_optimal - .1, \
                            msb_4_max_delay_optimal + .005, .005):
                        if (apx_optimal_mode[3]
                                == 0) and not (apx_optimal_mode[2] == 0):
                            run_tool_chain()
                            break
                        msb_4_max_delay = msb_4_max_delay_el
                        run_tool_chain()
                        if (apx_optimal_mode[3] == 0):
                            break
                    if (apx_optimal_mode[2] == 0):
                        break
                if (apx_optimal_mode[1] == 0):
                    break
            if (apx_optimal_mode[0] == 0):
                break
 def _calc_scales():
     """
 Calculate the different scales that will be applied to the images.
 """
     raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]
     min_scale = min(
         np.floor(np.log2(np.max(clusters_w[normal_idx] / raw_w))),
         np.floor(np.log2(np.max(clusters_h[normal_idx] / raw_h))))
     max_scale = min(1.0, -np.log2(max(raw_h, raw_w) / MAX_INPUT_DIM))
     scales_down = pl.frange(min_scale, 0, 1.)
     scales_up = pl.frange(0.5, max_scale, 0.5)
     scales_pow = np.hstack((scales_down, scales_up))
     scales = np.power(2.0, scales_pow)
     return scales
示例#16
0
 def _calc_scales():
   """
   Compute the different scales for detection
   :return: [2^X] with X depending on the input image
   """
   raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]
   min_scale = min(np.floor(np.log2(np.max(clusters_w[normal_idx] / raw_w))),
                   np.floor(np.log2(np.max(clusters_h[normal_idx] / raw_h))))
   max_scale = min(1.0, -np.log2(max(raw_h, raw_w) / MAX_INPUT_DIM))
   scales_down = pl.frange(min_scale, 0, 1.)
   scales_up = pl.frange(0.5, max_scale, 0.5)
   scales_pow = np.hstack((scales_down, scales_up))
   scales = np.power(2.0, scales_pow)
   return scales
示例#17
0
def simulateFreeFall(mass, simulationTime, deltaT):
    deltaVel = 0  # initial velocity should be 0?
    deltaD = 0  # change in distance
    accel = 9.81  #m/s^2, constant
    elapsedTime = []
    length = []
    velocity = []
    acceleration = []

    for t in pl.frange(0, simulationTime, deltaT):
        #print t
        # t can be considered elapsedTime
        elapsedTime.append(t)

        deltaD += deltaVel * deltaT
        length.append(deltaD)

        deltaVel += accel * deltaT
        velocity.append(deltaVel)

        acceleration.append(accel)

    #print elapsedTime
    #print length
    #print velocity

    return elapsedTime, length, velocity, acceleration
示例#18
0
def plot_averages(file_name, n, nb_iterations=10):
    p_values, deg_values, c_values, d_values = [], [], [], []

    deg_ana, clus_ana, d_ana = [], [], []
    if n != 3:
        deg_ana, clus_ana, d_ana = None, None, None

    for p in pl.frange(0.00, 1, 0.05):
        p_values.append(p)

        if n == 3:
            degree, clustering, shortest_path = analytics_averages(p)
            deg_ana.append(degree)
            clus_ana.append(clustering)
            d_ana.append(shortest_path)

        degree, clustering, shortest_path = 0, 0, 0
        for (deg, clus, sp) in get_averages(n, p, nb_iterations):
            degree += deg
            clustering += clus
            shortest_path += sp

        deg_values.append(degree / nb_iterations)
        c_values.append(clustering / nb_iterations)
        d_values.append(shortest_path / nb_iterations)

    plot_average(c_values, file_name + "avg_cls.pdf", p_values,
                 "average clustering coefficient",
                 "Average clustering coefficient against p", clus_ana)

    plot_average(d_values, file_name + "avg_d.pdf", p_values,
                 "Average diameter", "Average diameter against p", d_ana)

    plot_average(deg_values, file_name + "avg_deg.pdf", p_values,
                 "Average degree", "Average degree against p", deg_ana)
def alpha_impurity():
	"""
	Calculate and plot Gilbert Damping for on-site potential randomization at different strengths
	"""
	pass
	alphas = []
	strengths = []
	coll = []
	soc = 0.1
	length = 100
	energy = 1
	theta = 0
	randomize = True
	collector = coll
	with open('alpha_vs_impurity_soc0pt1_len100.txt','w') as f:
		for strength in pl.frange(0,0.1,0.05):
			rando_strength = strength
			strengths.append(strength)
			alpha = integrate.quad(inf_rashba_integrand, 0, 2*np.pi, args=(energy,length,soc,theta,randomize,rando_strength,collector),epsabs=1e-4, epsrel=1e-4, limit=50)[0]
			print(coll)
			f.write(str(strength)+' '+str(coll)+'\n')
			avg = np.mean(coll)
			f.write(str(strength)+' '+str(avg)+'\n')
			std = np.std(coll)
			f.write(str(strength)+' '+str(std)+'\n')
			alphas.append(avg)
	fig = plt.figure()
	ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
	ax.plot(strengths, alphas, 'bo', xs, ys, 'g')
	fig.savefig('alpha_impurity.png')
	plt.show()
示例#20
0
def main(args):

    ax = pylab.frange(-2, 2, 0.15)
    ay = []
    # dla x >= 1   y = x / (x + 2)
    # dla x >= 0   y = x^2 / 3
    # dla x < 0    y = x / -3

    for x in ax:
        if x >= 1:
            ay.append(x / (x + 2))
        elif x >= 0:
            ay.append(x**2 / 3)
        else:
            ay.append(x / -3)

    for i in range(len(ax)):
        print("x = {:.2f}, y = {:.2f}".format(ax[i], ay[i]))

    pylab.plot(ax, ay)
    pylab.title("Wykres funkcji")
    pylab.grid(True)
    pylab.show()

    return 0
示例#21
0
def main():
    x_list = [i for i in pl.frange(-20, 20, 0.1)]
    # y_list = [sigmoid_f(x) for x in x_list]
    # show_plot(x_list, y_list)

    y_list = [sigmoid_f(x, 1.1894132348229451) for x in x_list]
    show_plot(x_list, y_list)
示例#22
0
def main():
    design_name = "conf_int_mac__noFF__general"
    clk_period = .65
    #.55;#.63;#.68;#.7
    DATA_PATH_WIDTH = 32
    CLKGATED_BITWIDTH = 4
    #numebr of apx bits
    apx_optimal = 1
    lsb_bits = 3
    msb_min_delay = 0
    #.55;#.59;#.58
    #Pn = 24
    #msb_max_delay__upper_limit = .46;#.57;#.61;#.62;
    #msb_max_delay__lower_limit__delta = .1

    slow_down = .5
    #-----  -----    -----     -----     -----     -----
    msb_max_delay__upper_limit = clk_period / (1 + slow_down)
    #.57;#.61;#.62;
    msb_max_delay__lower_limit = .36
    msb_max_delay__step_size = .01
    #.57;#.61;#.62;
    #-----  -----    -----     -----     -----     -----
    precision_lower_limit = 25
    precision_higher_limit = 28

    msb_max_delay__upper_limit = float(
        "{0:.2f}".format(msb_max_delay__upper_limit))  #up to 2
    for precision__el in range(precision_lower_limit, precision_higher_limit):
        for msb_max_delay__el in pylab.frange(msb_max_delay__lower_limit,\
                msb_max_delay__upper_limit, msb_max_delay__step_size):
            run_tool_chain(design_name, clk_period, DATA_PATH_WIDTH,
                           CLKGATED_BITWIDTH, apx_optimal, lsb_bits,
                           msb_max_delay__el, msb_min_delay, precision__el,
                           slow_down)
示例#23
0
def FindE(imax):
    cenergy = []
    for i in frange(1, imax):
        iprod, errc = quad(C_i, 0, 1, args=(i))
        L = iprod**(2.) * Energy(i)
        cenergy.append(L)
    return cenergy
示例#24
0
def coeff(imax):
    values = []
    for i in frange(1, imax):
        iprod, err = quad(C_i, 0, 1, args=(i))
        c = iprod**(2.) * i**(2.)
        values.append(c)
    return values
def log_res(train_data_features, train_data_cross_validation_classwise_features, test_data_features, labels, labels_cross_validation_classwise, using_cross_validation2, kf, settings):
    if using_cross_validation2:
        logres_C = 1
        logres_results = []
        if(len(train_data_cross_validation_classwise_features) > 0):
            """train_all = np.append(train_data_features, train_data_cross_validation_classwise_features, axis=0)
            labels_all = np.append(labels, labels_cross_validation_classwise)
            kf_all = KFold(len(train_all)-1, n_folds=int(settings['Data']['CrossValidation2']), shuffle=True)
            for train, test in kf_all:
                C = logres_C
                p = 'l1'
                clf_l1_LR = LogisticRegression(C=C, penalty=p, tol=0.01)
                model = clf_l1_LR.fit(train_all[train], labels_all[train])
                predicted_classes = model.predict(train_all[test])
                predicted_classes_train = model.predict(train_all[train])
                print("N points:", len(predicted_classes), " percentage: ",(labels_all[test] != predicted_classes).sum()*100/len(predicted_classes),"%, percentage_train: ", (labels_all[train] != predicted_classes_train).sum()*100/len(predicted_classes_train))
                logres_results.append((labels_all[test] != predicted_classes).sum())
                logres_C += 1"""
            for c in pl.frange(logres_C,15, 1):
                clf_l1_LR = LogisticRegression(C=c, solver='lbfgs', penalty='l2', tol=0.01)
                model = clf_l1_LR.fit(train_data_features, labels)
                predicted_classes = model.predict(train_data_cross_validation_classwise_features)
                predicted_classes_train = model.predict(train_data_features)
                class_probabilities = model.predict_proba(train_data_cross_validation_classwise_features)
                logres_results.append(log_loss(labels_cross_validation_classwise, class_probabilities))
                print("N points:", len(predicted_classes), " percentage: ",(labels_cross_validation_classwise != predicted_classes).sum()*100/len(predicted_classes),
                      "%, percentage_train: ", (labels != predicted_classes_train).sum()*100/len(predicted_classes_train))
                print("Log_loss: ", log_loss(labels_cross_validation_classwise, class_probabilities))
        else:
            for train, test in kf:
                C = logres_C
                p = 'l1'
                clf_l1_LR = LogisticRegression(C=C, penalty=p, tol=0.01)
                model = clf_l1_LR.fit(train_data_features[train], labels[train])
                predicted_classes = model.predict(train_data_features[test])
                predicted_classes_train = model.predict(train_data_features[train])
                print("N points:", len(predicted_classes), " percentage: ",(labels[test] != predicted_classes).sum()*100/len(predicted_classes),"%, percentage_train: ", (labels[train] != predicted_classes_train).sum()*100/len(predicted_classes_train))
                logres_results.append((labels[test] != predicted_classes).sum())
                logres_C += 1
        print(logres_results)
        logres_C = logres_results.index(min(logres_results)) + 1
        print("Log Res C: ", logres_C)
        if(len(train_data_cross_validation_classwise_features) > 0):
            clf_l1_LR = LogisticRegression(C=logres_C, penalty='l2', tol=0.01)
            model = clf_l1_LR.fit(train_data_features, labels)
            predicted_classes = model.predict(train_data_cross_validation_classwise_features)
            predicted_classes_train = model.predict(train_data_features)
            class_probabilities = model.predict_proba(train_data_cross_validation_classwise_features)
            print("N points:", len(predicted_classes), " percentage: ",(labels_cross_validation_classwise != predicted_classes).sum()*100/len(predicted_classes),"%, percentage_train: ", (labels != predicted_classes_train).sum()*100/len(predicted_classes_train))
            print("Log_loss: ", log_loss(labels_cross_validation_classwise, class_probabilities))
        clf_l1_LR = LogisticRegression(C=logres_C, penalty='l1', tol=0.01)
        model = clf_l1_LR.fit(train_data_features, labels)
        return model.predict_proba(test_data_features), model.predict(test_data_features), model
    else:
        C = 1
        p = 'l1'
        clf_l1_LR = LogisticRegression(C=C, penalty=p, tol=0.01)
        model = clf_l1_LR.fit(train_data_features, labels)
        return model.predict_proba(test_data_features), model.predict(test_data_features), model
def main():
    x_list = [i for i in pl.frange(-20, 20, 0.1)]
    # y_list = [sigmoid_f(x) for x in x_list]
    # show_plot(x_list, y_list)


    y_list = [sigmoid_f(x, 1.1894132348229451) for x in x_list]
    show_plot(x_list, y_list)
示例#27
0
文件: wplot.py 项目: antiface/dsp-2
def ax_pianoroll(title='notes'):
    '''Twelve named semitones on the x-axis, one octave.'''
    pl.cla()
    pl.title(title)
    pl.xticks(pl.frange(0, 1, npts=12, closed=0), 'C. C# D. D# E. F. F# G. G# A. A# B.'.split())
    pl.yticks(pl.arange(24))
    pl.grid(True, axis='x')
    pl.xlim(-.5/12,11.5/12)
    pl.autoscale(True,axis='y')
示例#28
0
def plot_w(dic, name):
    import pylab

    X = pylab.frange(0, len(dic) - 1)
    Y = list(sorted(dic.values(), reverse=True))
    Y = map(lambda y:pylab.log(y), Y)
    pylab.plot(X, Y)
    #show()
    pylab.savefig(name + '.png')
示例#29
0
文件: wplot.py 项目: tripzilch/dsp
def ax_pianoroll(ax, title='notes'):
    '''Twelve named semitones on the x-axis, one octave.'''
    ax.clear()
    ax.set_title(title)
    ax.set_xticks(pl.frange(0, 1, npts=12, closed=0), 'C. C# D. D# E. F. F# G. G# A. A# B.'.split())
    ax.set_yticks(pl.arange(24))
    ax.set_grid(True, axis='x')
    ax.set_xlim(-.5/12, 11.5/12)
    ax.set_autoscale(True, axis='y')
示例#30
0
def plot_dic_cmp(dic, imgname, firstnum):
    import pylab

    X = pylab.frange(0, len(dic) - 1)
    Ys = list(sorted(dic.values(), key=lambda lis:sum(lis), reverse=True))
    for i in xrange(len(Ys[0])):
        Y = [y[i] for y in Ys]
        pylab.plot(X[:firstnum], Y[:firstnum])
    pylab.savefig(imgname + '_%d.png' % firstnum)
示例#31
0
def main():
    common_csv = '/1.csv'
    months = ['03', '04', '05', '06', '07', '08', '09']
    thresholds = list(pl.frange(0.006, 0.009, 0.001))
    for thres in thresholds:
        for day in months:
            path = '/home/marcus/pyalgo/lob_jsons' + folder + day + common_csv
            print("testing with threshold:", thres)
            trade_algo(inpath, thres)
示例#32
0
def internal(var, Izz):
    H_1_y, H_1_z, H_2_y, H_2_z, H_3_y, A_1_z = reac.calc_reac_f(Izz)
    if int(var) > 2.691 or int(var) < 0:
        raise ValueError(
            'internal shear and moment module error, x coordinates is out of bounds'
        )

    if var in list(pl.frange(0.0, x_1, step)):
        m = (-q * var**2 / 2)
        v_y = q * var
        v_z = 0
        v_y_pr = v_y * cos(theta) + v_z * sin(theta)
        v_z_pr = -v_y * sin(theta) + v_z * cos(theta)
    elif var in list(pl.frange(x_1, x_2 - x_a / 2, step)):
        m = (H_1_y * var - q * var**2 / 2)
        v_y = q * var - H_1_y
        v_z = H_1_zv_y_pr = v_y * cos(theta) + v_z * sin(theta)
        v_z_pr = -v_y * sin(theta) + v_z * cos(theta)
    elif var in list(pl.frange(x_2 - x_a / 2, x_2, step)):
        m = (H_1_y * var - q * var**2 / 2)
        v_y = q * var - H_1_y
        v_z = H_1_z + A_1_z
        v_y_pr = v_y * cos(theta) + v_z * sin(theta)
        v_z_pr = -v_y * sin(theta) + v_z * cos(theta)
    elif var in list(pl.frange(x_2, x_2 + x_a / 2, step)):
        m = (H_2_y * var - q * var**2 / 2)
        v_y = q * var - H_1_y - H_2_y
        v_z = H_1_z + A_1_z + H_2_z
        v_y_pr = v_y * cos(theta) + v_z * sin(theta)
        v_z_pr = -v_y * sin(theta) + v_z * cos(theta)
    elif var in list(pl.frange(x_2 + x_a / 2, x_3, step)):
        m = (H_2_y * var - q * var**2 / 2)
        v_y = q * var - H_1_y - H_2_y
        v_z = 0
        v_y_pr = v_y * cos(theta) + v_z * sin(theta)
        v_z_pr = -v_y * sin(theta) + v_z * cos(theta)
    else:
        m = (H_3_y * var - q * var**2 / 2)
        v_y = q * var - H_1_y - H_2_y - H_3_y
        v_z = 0
        v_y_pr = v_y * cos(theta) + v_z * sin(theta)
        v_z_pr = -v_y * sin(theta) + v_z * cos(theta)

    return m, v_y, v_z, v_y_pr, v_z_pr
    def calc_scales(self, raw_img):
        '''

        Calculates the different scales of a frame for the CNN to find faces inself.

        '''

        raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]
        min_scale = min(
            np.floor(np.log2(np.max(self.clusters_w[self.normal_idx] /
                                    raw_w))),
            np.floor(np.log2(np.max(self.clusters_h[self.normal_idx] /
                                    raw_h))))
        max_scale = min(1.0, -np.log2(max(raw_h, raw_w) / MAX_INPUT_DIM))
        scales_down = pl.frange(min_scale, 0, 1.)
        scales_up = pl.frange(0.5, max_scale, 0.5)
        scales_pow = np.hstack((scales_down, scales_up))
        scales = np.power(2.0, scales_pow)
        return scales
示例#34
0
def process_svr(df):

    bestsc = -100
    bestpara = 1
    for c in pl.frange(0.5,1.5,0.1):
        clf = svm.SVR(C =c )
        scores = cross_validation.cross_val_score(clf,df[predictors],df[target1].values.ravel(),cv = 5)
        score = np.mean(scores)
        if (bestsc < score):
            bestsc = score
            bestpara = c
    return bestpara
示例#35
0
def process_ridge(df):

    bestpara = 0
    bestsc = -1000
    for alp in pl.frange(0.5,1.5,0.1):
        clf = Ridge(alpha = alp)
        scores = cross_validation.cross_val_score(clf,df[predictors],df[target1].values.ravel(),cv = 5)
        score = np.mean(scores)
        if (bestsc < score):
            bestsc = score
            bestpara = alp
            
    return bestpara
示例#36
0
def plot_theory():
    '''Produce a plot showing the forcing, analytic velocity solution and
    analytic pressure solution'''
    from pylab import \
    plot,figure,quiver,frange,subplot,xticks,yticks,axis,xlabel,ylabel, \
    subplots_adjust 


    figure()

    y=frange(0.0,1,0.05)

    psol=pressure_solution(forcing)

    usol=solution(forcing)

    v=0*y

    x=0*y

    us=array([float(usol(pos)) for pos in zip(x,y)])

    ps=array([float(psol(pos)) for pos in zip(x,y)])

    uf=array([forcing(pos) for pos in zip(x,y)])[:,0]

    subplots_adjust(wspace=0.25)
    subplot(1,3,1)

    quiver(x[1:-1],y[1:-1],uf[1:-1],v[1:-1], scale=1)
    plot(uf,y)
    xticks([0,0.5,1],map(str,[0,0.5,1]))
    yticks([ 0 ,  0.2,  0.4,  0.6,  0.8,  1 ],map(str,[ 0 ,  0.2,  0.4,  0.6,  0.8,  1 ]))
    ylabel("y")
    xlabel("u source")

    subplot(1,3,2)
    plot(us,y)
    quiver(x[1:-1],y[1:-1],us[1:-1],v[1:-1], scale=.03)
    xticks([0,0.01,0.02,0.03],map(str,[0,0.01,0.02,0.03]))
    yticks([])
    xlabel("u solution")

    subplot(1,3,3)
    plot(ps,y)
    xticks([-0.02,-0.01,0],map(str,[-0.02,-0.01,0]))
    yticks([])
    xlabel("p solution")
    

    return uf,us,ps
def create_ringmap(one2onepar,ringmap):
    run_dir = config['defaultsave.directory']
    ringmap= os.path.join(run_dir,os.path.basename(ringmap)) 
    
    fid = open(ringmap,'w')    
    det = np.genfromtxt(one2onepar,
                                    names="l2, 2theta, phi, pwid, phigh",
                                    skip_header=1,
                                    dtype =(float, float, float, float, float))
                                    
    ttheta=np.array(det['2theta'])
    group=0
    numspec_tot=0

    dtheta=0.63
    for angle in py.frange(2.83,136,dtheta):
        myindex=(ttheta>(angle-dtheta/2))*(ttheta<(angle+dtheta/2))
        spectra=np.asarray(np.where(myindex))
        spectra=spectra+1
        numspec=np.shape(spectra)[1]
        if np.shape(spectra)[1]>0:
            group=group+1
    
    fid.write('{0:4.0f}\n'.format(group))
    group=0
    for angle in py.frange(2.83,136,dtheta):
        myindex=(ttheta>(angle-dtheta/2))*(ttheta<(angle+dtheta/2))
        spectra=np.asarray(np.where(myindex))
        spectra=spectra+1
        numspec=np.shape(spectra)[1]
        if np.shape(spectra)[1]>0:
            group=group+1
        fid.write('{0:4.0f}\n'.format(group))
        fid.write('{0:5.0f}\n'.format(np.shape(spectra)[1]))
        for i in range(numspec):
            fid.write('{0:6.0f}\n'.format(spectra[0][i]))
    
    fid.close()
示例#38
0
def a2(db_name):
    max_tp = 0
    max_tn = 0
    alpha = []
    for alpha1 in range(2, 20, 1):
        for alpha2 in pylab.frange(0.1, 0.5, 0.1):
            for alpha3 in range(1000, 30000, 1000):
                percent = a1(alpha1, alpha2, alpha3, db_name)
                if percent[0][0] > max_tp and percent[1][1] > max_tn:
                    max_tp = percent[0][0]
                    max_tn = percent[1][1]
                    alpha = [alpha1, alpha2, alpha3]
    t_matrix.append([max_tp, max_tn, db_name[1:]])
    print alpha, db_name, max_tp, max_tn
def simulateBungee(mass, simulationTime, deltaT, surfaceArea,
                   unstretchedBungeeLength):
    # F_weight = mass * g
    # F_friction = -0.65 * surfaceArea * v * abs(v)
    # use 0.2m^2 for surface area
    # acceleration = F_total / mass
    # F_total = F_weight + F_friction
    # F_Spring = -k * d (k = 21.7)(d = displacement)

    F_weight = mass * 9.81
    k_cons = 21.7

    deltaVel = 0  # initial velocity should be 0?
    deltaD = 0  # change in distance
    #accel = 9.81  # m/s^2, not constant anymore
    elapsedTime = []
    length = []
    velocity = []
    acceleration = []

    for t in pl.frange(0, simulationTime, deltaT):
        #print t
        # t can be considered elapsedTimet

        # Store time step values
        elapsedTime.append(t)

        # Calculate distance and update value
        deltaD += deltaVel * deltaT
        length.append(deltaD)

        # Friction and acceleration calculations
        F_friction = -0.65 * surfaceArea * deltaVel * abs(deltaVel)
        # Hooke's Law
        F_spring = -k_cons * (deltaD - unstretchedBungeeLength)
        F_total = F_weight + F_friction + F_spring
        accel = F_total / mass

        # Calculate Delta Velocity and update value
        deltaVel += accel * deltaT
        velocity.append(deltaVel)

        acceleration.append(accel)

    #print elapsedTime
    #print length
    #print velocity
    #print acceleration

    return elapsedTime, length, velocity, acceleration
def getRays(point, jacobi_xyz, n_rays,
            wedge_angle):  #Returns list of rays with origins and directions
    rays = []
    for angle in pylab.frange(-wedge_angle, wedge_angle + 0.001,
                              2.0 * wedge_angle / n_rays):
        T_wedge = numpy.eye(4)
        T_wedge[0:3, 0:3] = get_data.rotation_matrix_from_rpy(0, angle, 0)
        ray_normal = transformNormal(T_wedge, jacobi_xyz)[0:3]
        ray = numpy.array([
            point[0], point[1], point[2], ray_normal[0], ray_normal[1],
            ray_normal[2]
        ])
        rays.append(ray)
    return rays
示例#41
0
def a2(db_name):
    max_tp = 0
    max_tn = 0
    alpha = []
    for alpha1 in range(2, 20, 1):
        for alpha2 in pylab.frange(0.1, 0.5, 0.1):
            for alpha3 in range(1000, 30000, 1000):
                percent = a1(alpha1, alpha2, alpha3, db_name)
                if percent[0][0] > max_tp and percent[1][1] > max_tn:
                    max_tp = percent[0][0]
                    max_tn = percent[1][1]
                    alpha = [alpha1, alpha2, alpha3]
    t_matrix.append([max_tp, max_tn, db_name[1:]])
    print alpha, db_name, max_tp, max_tn
def inf_rashba_cond():
	"""
	Compute and plot conductance of infinite Rashba Plane.
	"""
	xs = []
	ys = []
	for energy in pl.frange(-4.5,4.5,0.1):
		xs.append(energy)
		alpha = integrate.quad(inf_rashba_integrand,0, 2*np.pi, args=(energy,),epsabs=1e-1, epsrel=1e-1, limit=50)[0]
		ys.append(alpha)
	fig = plt.figure()
	ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
	ax.plot(xs, ys, 'bo', xs, ys, 'g')
	fig.savefig('rashba_alpha_vs_energy_fixed_SOC_0.01.png')
	plt.show()
def infinite_rashba():
	"""
	Find Gilbert Damping in infinite Rashba plane through k-integration.
	"""
	xs = []
	ys = []
	for energy in pl.frange(-4,4,0.1):
		xs.append(energy)	
		alpha = integrate.quad(inf_rashba_integrand, 0, 2*np.pi,args=(energy,))
		ys.append(alpha)
	fig = plt.figure()
	ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
	ax.plot(xs, ys, 'b')
	fig.savefig('rashba_alpha_vs_energy.png')
	plt.close(fig)
示例#44
0
def b():
    nodos = pl.frange(0, 50, 0.1)
    clique = [x**5 for x in nodos]
    a = [random.normalvariate(x**2, 0.5) for x in nodos]
    b = [random.normalvariate(x**2.5, 0.5) for x in nodos]

    plt.clf()
    df = pd.DataFrame({
        'Algoritmo A': a[0:50],
        'Algoritmo B': b[0:50],
        'Clique máxima': clique[0:50]
    })
    df.plot(x='Clique máxima')

    plt.show()
def distribution(data, dx):
    data_min=min(data)
    data_max=max(data)
    x=[]
    y=[]

    for interval in frange(data_min+dx/2, data_max, dx):
        x.append(interval)
        y.append(0)

    for i in data:
        index=int((i-data_min)/dx)
        y[index]+=1

    return (x, y)
def plot_potential_energy(containers, dt, num_total_frames):
    init_container = containers[0]
    pl.clf()
    # Plot PE --------------------------
    times = pl.frange(dt, num_total_frames*dt, dt)  # skip zeroth time because we have no value for it
    pes = [c.potential_energy for c in containers[1:]]  # skip first container because it has no PE
    plotted_pes = np.array(pes[:len(times)])
    plotted_pes /= init_container.num_particles  # PE per particle
    pl.plot(times, plotted_pes, 'o-', color='black', markersize=1, linewidth=0.1)
    pl.ylabel('Potential energy per particle')
    pl.xlabel('Time')
    pl.title('PE/particle for {} frames'.format(num_forward_frames))
    pl.savefig('{}/plots/svg/pe {}.svg'.format(working_directory, info_for_naming))
    pl.savefig('{}/plots/pe {}.png'.format(working_directory, info_for_naming))
    pl.show()
示例#47
0
def mk_grid(center: Point, R, r=20):
    step = r * 1 / 2
    # the number of kilometers in one radian
    # kms_per_radian = 6371.0088
    # radian_per_km = 0.00015696101377226163
    deg_per_km = 0.0089932036372453797

    R_rad = deg_per_km * R
    r_rad = deg_per_km * r
    step_rad = deg_per_km * step
    lat_range = [
        i for i in pl.frange(center.latitude - R_rad + r_rad, center.latitude +
                             R_rad - r_rad, step_rad)
    ]
    lng_range = [
        i for i in pl.frange(center.longitude -
                             (deg_per_km * R), center.longitude +
                             (deg_per_km * R), step_rad)
    ]
    grid = []
    for x in lat_range:
        for y in lng_range:
            grid.append(Point(x, y))
    return grid
示例#48
0
def ColorSpectrum(num_colors):
    """wrapper for the hue2clr module
    
    USAGE: ColorSpectrum(num_colors)
    OUTPUT: array of triplets defining equally 'spaced' colors on perimeter of color circle
    
    """
    if (num_colors == 1):
	hue=n.array([0])
    else:
	hue=p.frange(0,0.667,npts=num_colors)
	# print 'num_colors=',num_colors,'hue=',hue
    spect = hue2clr(hue)

    return spect
def create_multiscales_training_dataset(data_path, output_path):

    # check thet data path is either Train or Test inside INRIAPerson
    inria_person_path, subfolder_name = os.path.split(data_path)
    assert os.path.isdir(data_path)
    assert os.path.basename(inria_person_path) == "INRIAPerson"
    assert subfolder_name in ["Train", "Test"]

    annotations = list(annotations_to_filenames_and_boxes(data_path))

    # define the scales we care about
    # this is the scales range used for INRIA
    min_scale, max_scale = 0.6094, 8.6
    delta_octave = 1.0
    #delta_octave = 0.5

    model_width, model_height = 64, 128
    # how many pixels around the model box to define a test image ?
    cropping_border = 20

    if True:
        # just for testing
        cropping_border = 20
        #min_scale, max_scale = 4, 4
        min_scale, max_scale = 1, 1

    min_octave = int(round(math.log(min_scale)/math.log(2)))
    max_octave = int(round(math.log(max_scale)/math.log(2)))
    #max_octave=-1
    octaves = list(pylab.frange(min_octave, max_octave, delta_octave))
    # for debugging, it is easier to look at big pictures first
    octaves.reverse()

    if os.path.exists(output_path):
        raise RuntimeError(output_path + " should not exist")
    else:
        os.mkdir(output_path)
        print("Created folder", output_path)

    print("Will create data for octaves", octaves)

    for octave in octaves:
        create_positives_and_negatives(data_path, output_path, annotations,
                                       model_width, model_height,
                                       cropping_border, octave)
    # end of "for each octave"

    return
def create_multiscales_training_dataset(data_path, output_path):

    # check thet data path is either Train or Test inside INRIAPerson
    inria_person_path, subfolder_name = os.path.split(data_path)
    assert os.path.isdir(data_path)
    assert os.path.basename(inria_person_path) == "INRIAPerson"
    assert subfolder_name in ["Train", "Test"]

    annotations = list(annotations_to_filenames_and_boxes(data_path))

    # define the scales we care about
    # this is the scales range used for INRIA
    min_scale, max_scale = 0.6094, 8.6
    delta_octave = 1.0
    #delta_octave = 0.5

    model_width, model_height = 64, 128
    # how many pixels around the model box to define a test image ?
    cropping_border = 20

    if True:
        # just for testing
        cropping_border = 20
        #min_scale, max_scale = 4, 4
        min_scale, max_scale = 1, 1

    min_octave = int(round(math.log(min_scale) / math.log(2)))
    max_octave = int(round(math.log(max_scale) / math.log(2)))
    #max_octave=-1
    octaves = list(pylab.frange(min_octave, max_octave, delta_octave))
    # for debugging, it is easier to look at big pictures first
    octaves.reverse()

    if os.path.exists(output_path):
        raise RuntimeError(output_path + " should not exist")
    else:
        os.mkdir(output_path)
        print("Created folder", output_path)

    print("Will create data for octaves", octaves)

    for octave in octaves:
        create_positives_and_negatives(data_path, output_path, annotations,
                                       model_width, model_height,
                                       cropping_border, octave)
    # end of "for each octave"

    return
def simulateFallFriction(mass, simulationTime, deltaT, surfaceArea):
    # F_weight = mass * g
    # F_friction = -0.65 * surfaceArea * v * abs(v)
    # use 0.2m^2 for surface area
    # acceleration = F_total / mass
    # F_total = F_weight + F_friction


    F_weight = mass * 9.81


    deltaVel = 0  # initial velocity should be 0?
    deltaD = 0  # change in distance
    #accel = 9.81  # m/s^2, not constant anymore
    elapsedTime = []
    length = []
    velocity = []
    acceleration = []

    for t in pl.frange(0, simulationTime, deltaT):
        #print t
        # t can be considered elapsedTimet

        # Store time step values
        elapsedTime.append(t)

        # Calculate distance and update value
        deltaD += deltaVel * deltaT
        length.append(deltaD)

        # Friction and acceleration calculations
        F_friction = -0.65 * surfaceArea * deltaVel * abs(deltaVel)
        F_total = F_weight + F_friction
        accel = F_total / mass

        # Calculate Delta Velocity and update value
        deltaVel += accel * deltaT
        velocity.append(deltaVel)

        acceleration.append(accel)

    #print elapsedTime
    #print length
    #print velocity
    #print acceleration

    return elapsedTime, length, velocity, acceleration
示例#52
0
def create_histogram(a, trim_p, bin_size, p_title, p_ylabel, p_xlabel, 
                     file_prefix, dec_prec=0, trim_type="both"):
    """Interacts with pylab to draw and save histogram plot.
    
    Arguments:
    a -- array_like list of values to plot
    trim_p -- Percentile (range 0 to 1) of values to trim (float)
    bin_size -- Size of histogram's value bins (float)
    p_title -- Title of plot
    p_ylabel -- ylabel of plot
    p_xlabel -- xlabel of plot
    file_prefix -- Filename prefix
    dec_prec -- (Optional) Decimal precision of bins. Defaults to 0. (int)
    trim_type -- (Optional) Controls the tail of distribution that percentile 
                 trim_p is applied. Values include "both", "left", and "right".
                 Defaults to "both".
    """
    a.sort()
    sample_size = len(a)
    print("a length pre-trim_p", len(a))
    if trim_type == "left" or trim_type == "right":
        a = stats.trim1(a, trim_p, trim_type)
    else:
        a = stats.trimboth(a, trim_p)
    print("a length post-trim_p", len(a))
    bin_min = math.floor(min(a)) # TODO Round down to dec_prec instead
    bin_max = round(max(a), dec_prec)
    print("bin size=" + str(bin_size) + 
          ", bin min=" + str(bin_min) + 
          ", bin max=" + str(bin_max))
    # Create histogram of values
    n, bins, patches = pylab.hist(a, 
                                  bins=pylab.frange(bin_min, 
                                                    bin_max, 
                                                    bin_size), 
                                  normed=False, 
                                  histtype="stepfilled")
    pylab.setp(patches, "facecolor", "g", "alpha", 0.75)
    pylab.title(p_title)
    pylab.xlabel(p_xlabel)
    pylab.ylabel(p_ylabel)
    
    if trim_p > 0:
        pylab.savefig(file_prefix + "_trimmed.png")
    else:
        pylab.savefig(file_prefix + ".png")
    pylab.show()
def create_multiscales_training_dataset(data_path, output_path, target_category):


    # check thet data path is either Train or Test inside INRIAPerson
    inria_person_path, subfolder_name = os.path.split(data_path)
    assert os.path.isdir(data_path)
    #assert os.path.basename(inria_person_path) == "INRIAPerson"
    #assert subfolder_name in ["Train", "Test"]

    # Done: Adapt this to GTSD
    # target_category = 'Mandatory'
    annotations = annotations_to_filenames_and_boxes(data_path, target_category)

    # define the scales we care about
    min_scale, max_scale = 0.33, 2.7; # for GTSDB
    delta_octave = 1

    model_width, model_height =56, 56
    cropping_border = 10 # how many pixels around the model box to define a test image ?

    if False:
        # just for testing
        cropping_border = 20 # how many pixels around the model box to define a test image ?
        min_scale, max_scale = 4, 4

    min_octave = int(round(math.log(min_scale)/math.log(2)))
    max_octave = int(round(math.log(max_scale)/math.log(2)))
    octaves = list(pylab.frange(min_octave, max_octave, delta_octave))
    octaves.reverse() # for debugging, it is easier to look at big pictures first

    if os.path.exists(output_path):
        raise RuntimeError(output_path + " should not exist")
    else:
        os.mkdir(output_path)
        print("Created folder", output_path)

    print("Will create data for octaves", octaves)

    for octave in octaves:
        create_positives_and_negatives(data_path, output_path, annotations,
                                       model_width, model_height, cropping_border,
                                       octave)
    # end of "for each octave"

    return
def create_multiscales_training_dataset(data_path, output_path, category):


    # check that data path is KITTI's training path
    assert os.path.split(data_path)[-1] == "training"

    annotations = list(annotations_to_filenames_and_boxes(data_path, category))

    # define the scales we care about
    min_scale, max_scale = 1, 1 # this is the scales range used for KITTI
    delta_octave = 1.0
    #delta_octave = 0.5

    # TODO: different model size for categories
    model_width, model_height = 104, 58

    cropping_border = 10 # how many pixels around the model box to define a test image ?

    if False:
        # just for testing
        cropping_border = 20 # how many pixels around the model box to define a test image ?
        min_scale, max_scale = 4, 4

    min_octave = int(round(math.log(min_scale)/math.log(2)))
    max_octave = int(round(math.log(max_scale)/math.log(2)))
    octaves = list(pylab.frange(min_octave, max_octave, delta_octave))
    octaves.reverse() # for debugging, it is easier to look at big pictures first

    if os.path.exists(output_path):
        raise RuntimeError(output_path + " should not exist")
    else:
        os.mkdir(output_path)
        print("Created folder", output_path)

    print("Will create data for octaves", octaves)

    for octave in octaves:
        create_positives_and_negatives(data_path, output_path, annotations,
                                       model_width, model_height, cropping_border,
                                       octave)
    # end of "for each octave"

    return
    def run(self, integration_fn, dt, t_end=1, predictor_corrector_used=False):
        # Let us call the state of the system 'state'
        states = [np.array([self.initial_position, self.initial_velocity])]

        elapsed_time = 0
        while elapsed_time < t_end:
            if predictor_corrector_used:
                if len(states) == 1:
                    prev_state = None
                else:
                    prev_state = states[-2]
                next_state = integration_fn(elapsed_time, prev_state, states[-1], self.step, dt)
            else:
                next_state = integration_fn(elapsed_time, states[-1], self.step, dt)
            states.append(next_state)
            elapsed_time += dt

        # The trouble with the above, is that what you really want is all the positions
        # Turn the list to an array:
        states = np.array(states)

        # Now array slices get all positions and velocities:
        self.positions = states[:,0]
        self.velocities = states[:,1]
        self.times = pl.frange(0, elapsed_time, dt)

        # Ugly stuff to get the accelerations
        #  (certain methods, e.g. RK, take many steps at the same time value)
        # Ok, so actually there are 57 unique time points for acceleration. Of them, 27 are the same as the `times` and the rest are points about halfway in-between time points. It's a little too complicated to figure out an appropriate accel value for each time point (since they are floats and don't index well), so eff it for now.
##        print 'accelerations_at', self.accelerations_at
##        assert len(self.accelerations_at.keys()) == len(self.times), "Uh-oh, found different time points for acceleration."
##        self.accelerations = [sum(x)/len(x) for x in self.accelerations_at.itervalues()]

        # store vars in the object for easier inspection
        self.elapsed_time = elapsed_time
        self.states = states
##        l = locals().copy()
##        del l['self']
##        for key,value in l.iteritems():
##            setattr(self, key, value)
        return self  # allow for chaining (or being chained from ctor)
示例#56
0
def ColorSpectrum(num_colors,return_as_tuples=False):
    """wrapper for the hue2clr module
    
    USAGE: ColorSpectrum(num_colors)
    OUTPUT: array of triplets defining equally 'spaced' colors on perimeter of color circle
    
    """
    if (num_colors == 1):
	hue=n.array([0])
    else:
	hue=p.frange(0,0.667,npts=num_colors)
	# print 'num_colors=',num_colors,'hue=',hue
    spect = hue2clr(hue)

    if return_as_tuples:
	rgb_tuple_list=[]
	for rgb_set in spect:
	    rgbtuple=(rgb_set[0],rgb_set[1],rgb_set[2])
	    rgb_tuple_list.append(rgbtuple)
	return rgb_tuple_list
    else:
	return spect
示例#57
0
 def update(self, history, currenttime, timestep, stepmax, params, learning_rule="STDP", neurons=None, reward=None):
     if learning_rule == "STDP":
         for t in pl.frange(timestep, stepmax, timestep):
             if t < currenttime:
                 self.weights[
                     np.array(history[currenttime], int).reshape((-1, 1)), np.array(history[currenttime - t], int)
                 ] += params[0] * np.exp(-params[1] * (currenttime - timestep))
     ###
     # STDP with a global reinforcement signal, a la Florian 2005
     ### GLOBAL REINFORCEMENT CODE UNDER DEVELOPMENT
     if learning_rule == "STDP_GLOBAL_REINFORCEMENT":
         beta, gamma, tau, tau_sigma = params
         neurons.sort()
         # loop through all neurons
         eta = np.zeros(np.shape(self.weights))
         z = np.zeros(np.shape(self.weights))
         sigma = []
         for num_n, n in enumerate(neurons):
             ss = (n.dt / tau_sigma) * np.exp(beta * (n.Vm - n.Vth))
             if ss < 1:
                 sigma.append(ss)
             else:
                 sigma.append(0.99)
             if n.spike == True:
                 for num_m, m in enumerate(neurons):
                     for k in xrange(int(currenttime / n.dt)):
                         eta[num_n, num_m] += beta * np.exp(-((k - 1) * n.dt / tau))  #
                         self.weights[num_n, num_m] += gamma * reward * z[num_n, num_m]
                         self.weights[num_m, num_n] -= gamma * reward * z[num_m, num_n]
                         z[num_n, num_m] = beta * z[num_n, num_m] + eta[num_n, num_m]
             else:
                 for num_m, m in enumerate(neurons):
                     for k in xrange(int(currenttime / n.dt)):
                         eta[num_n, num_m] -= ((beta * sigma[num_n] / (1 - sigma[num_n]))) * np.exp(
                             -((k - 1) * n.dt / tau)
                         )
                         self.weights[num_n, num_m] += gamma * reward * z[num_n, num_m]
                         self.weights[num_m, num_n] -= gamma * reward * z[num_m, num_n]
                         z[num_n, num_m] = beta * z[num_n, num_m] + eta[num_n, num_m]
    def run(self, integration_fn, dt, t_end=1, predictor_corrector_used=False):
        # Let us call the state of the system 'state'
        states = [np.array(self.inititial_state)]

        elapsed_time = 0
        while elapsed_time < t_end:
            next_state = integration_fn(elapsed_time, states[-1], self.step, dt)
            states.append(next_state)
            elapsed_time += dt

        # The trouble with the above, is that what you really want is all the positions
        # Turn the list to an array:
        states = np.array(states)

        self.times = pl.frange(0, elapsed_time, dt)

        # store vars in the object for easier inspection
        self.elapsed_time = elapsed_time
        self.states = states
##        l = locals().copy()
##        del l['self']
##        for key,value in l.iteritems():
##            setattr(self, key, value)
        return self  # allow for chaining (or being chained from ctor)
示例#59
0
def plot_patches(src_num, info):
	# Define constants #
	deg2rad   = np.pi/180.
	# Define the width of area #
	beam   = 14.4           # Beam = 3.5'
	dbeam  = beam/60./2.     # Beam = 3.5' -> dbeam = beam/60/2 in degree
	offset = dbeam          # degree

	# HI continuum map and resolution #
	cont  = hp.read_map(os.getenv("HOME")+'/hdata/hi/lambda_chipass_healpix_r10.fits', field = 0, h=False)
	nside = hp.get_nside(cont)
	res   = hp.nside2resol(nside, arcmin=False)
	dd    = res/deg2rad/5.0

	# OK - Go #
	tb    = {}
	for i in range(0,src_num):
		if(i ==2): continue
		if(i ==3): continue
		if(i ==6): continue
		if(i ==11): continue
		## Find the values of Continuum temperature #
		tb[i] = []
		
		## Longitude and latitude ##
		l     = info['l'][i]
		b     = info['b'][i]

		# if(i != 14): continue

		# Plot cartview a/o mollview #
		ll = l
		if (l>180):
			ll = ll-360.

		f = 10.
		# if (i == sr):
		proj = hp.cartview(cont, title=info['src'][i]+'('+str(info['l'][i])+','+str(info['b'][i])+')', coord='G', unit='',
				norm=None, xsize=1920, lonra=[ll-0.5,ll+0.5], latra=[b-0.5,b+0.5],
				return_projected_map=True)

		# hp.cartview(cont, title=info['src'][i]+'('+str(info['l'][i])+','+str(info['b'][i])+')', coord='G', unit='',
		# 		norm='hist', xsize=800, lonra=[ll-offset-f*offset,ll+offset+f*offset], latra=[b-offset-f*offset,b+offset+f*offset],
		# 		return_projected_map=True)

		# hp.orthview(cont, title=info['src'][i]+'('+str(info['l'][i])+','+str(info['b'][i])+')', coord='G', unit='',
		# 		norm='hist', xsize=800, return_projected_map=True)

		# hp.mollview(cont, title=info['src'][i]+'('+str(info['l'][i])+','+str(info['b'][i])+')',
		# 	coord='G', unit='', rot=[0,0,0], norm='hist')

		# hp.mollzoom(cont, title=info['src'][i]+'('+str(info['l'][i])+','+str(info['b'][i])+')',
		# 	coord='G', unit='', rot=[0,0,0], norm=None, min=4599., max=4600.)

		print proj
		print proj.shape

		# Cal. #
		theta = (90.0-b)*deg2rad
		phi   = l*deg2rad
		pix   = hp.ang2pix(nside, theta, phi, nest=False)

		if (cont[pix] > -1.0e30) : # Some pixels not defined
			tb[i].append(cont[pix])

		for x in pl.frange(l-offset, l+offset, dd):
			for y in pl.frange(b-offset, b+offset, dd):
				cosb = np.cos(b*deg2rad)
				cosy = np.cos(y*deg2rad)
				if ( (((x-l)**2 + (y-b)**2) <= offset**2) ):
					theta = (90.0 - y)*deg2rad
					phi   = x*deg2rad
					pix   = hp.ang2pix(nside, theta, phi, nest=False)
					# hp.projtext(x, y, '.'+str(pix)+str(cont[pix]), lonlat=True, coord='G')
					# hp.projtext(x, y, '.'+str(cont[pix]), lonlat=True, coord='G')
					hp.projtext(x, y, '.', lonlat=True, coord='G')

		plt.show()