Exemplo n.º 1
0
def crea_escenarios(lista, rango, filtro):

    print('-Crea escenarios-')

    df = read_csv('Segurosdb')

    df = df.drop(lista[1], axis=1)
    vars = len(lista[2])

    if filtro != '':
        df = df.loc[df['COD'] == filtro]

    set_group = df.groupby(lista[0])[lista[2]].apply(lambda x: x.astype(int).sum())

    set_group = set_group.groupby(lista[3])[lista[2]].apply(lambda y: y.astype(int).sum())

    set_group.sort_values(lista[3], ascending=[False, True], inplace=True)

    #print(set_group)

    if vars == 1:
        esc = transpone(set_group,rango,lista[2])
        df = renombracols(esc, rango)
        crea_csv(df, 'Esc' + filtro + str(lista[2][0]) + str(rango) + 'M')
    elif vars == 2:
        esc = transpone2(set_group,rango,lista[2][0],lista[2][1])
        df = renombracols2(esc, rango, lista[2][0], lista[2][1])
        crea_csv(df, 'Esc' + filtro + str(lista[2][0]) + str(lista[2][1]) + str(rango) + 'M')
    else:
        esc = transpone3(set_group,rango,lista[2][0],lista[2][1],lista[2][2])
        df = renombracols3(esc,rango,lista[2][0],lista[2][1],lista[2][2])
        crea_csv(df, 'Esc' + filtro + str(lista[2][0]) + str(lista[2][1]) + str(lista[2][2]) + str(rango) + 'M')
Exemplo n.º 2
0
def graficar_dc_sweep(spice_filename, input_file, output_filename1,
                      output_filename2, ganancia, minv, maxv):

    data_basic = read_csv("input/Ej1_DCSweep/" + input_file)
    data = dict()
    data["t"] = []
    data["vin"] = []
    data["vout"] = []

    for i in range(len(data_basic["t"])):
        if minv < data_basic["t"][i] < maxv:
            for j in data_basic.keys():
                data[j].append(data_basic[j][i])

    draw_time(data, output_filename1)

    data_vo = computar_funcion(data)
    spice_data = read_spice_vin_vout("input/Ej1_SpiceDCSweep/" +
                                     spice_filename)
    teorico_data = generar_teorico(ganancia)

    fig, ax1 = plt.subplots()

    ax1.plot(data_vo["vin"], data_vo["vout"], 'blue', linewidth=3)
    ax1.plot(spice_data["vin"], spice_data["vout"], "green", linewidth=3)
    ax1.plot(teorico_data["vin"], teorico_data["vout"], "magenta", linewidth=1)

    plt.xlabel("Vin (V)")
    plt.ylabel("Vout (v)")

    blue_patch = mpatches.Patch(color='blue', label='Práctica')
    green_patch = mpatches.Patch(color='green', label='Simulación')
    red_patch = mpatches.Patch(color='magenta', label='Teoría')

    #red_patch = mpatches.Patch(color='red', label='Simulación')

    plt.legend(handles=[blue_patch, green_patch, red_patch])
    ax1.minorticks_on()
    ax1.grid(which='major', linestyle='-', linewidth=0.3, color='black')
    ax1.grid(which='minor', linestyle=':', linewidth=0.1, color='black')

    datacursor(display='multiple',
               tolerance=10,
               formatter="Vin: {x:.1f} v \nVout:{y:.1f} v".format,
               draggable=True)

    plt.show()
    input("Press Enter ")

    fig.savefig("output/dc_sweep/vinvout/" + output_filename2)

    plt.cla()
    plt.close()
Exemplo n.º 3
0
def armar_grafico_muestras(dir , output_filename):
    fig , ax1 = plt.subplots()
    data = read_csv("input/muestras/"+dir)

    ax1.plot(data["t"],data["vin"] , color='red')
    ax1.plot(data["t"],data["vout"], color='blue')

    red_patch = mpatches.Patch(color='red', label='In')
    green_patch = mpatches.Patch(color='blue', label='Out')

    plt.xlabel("Tiempo (s)")
    plt.ylabel("Tensión (v)")

    plt.legend(handles=[red_patch, green_patch])


    ax1.minorticks_on()
    ax1.grid(which='minor', linestyle=':', linewidth=0.1, color='black')
    ax1.grid(which='major', linestyle='-', linewidth=0.3, color='black')

    fig.savefig("output/muestras/" + output_filename, dpi=300)
    plt.cla()
Exemplo n.º 4
0
        self.train_list = []
        self.test_list = []
        self.token_list = []
        self.filtered_list = []
        self.filtered_list_lowercase = []
        self.filtered_list_remove_stopwords = []
        self.filtered_list_remove_repeated_characters = []
        self.list_spell_checker = []
        self.filtered_list_stemmer = []
        self.filtered_list_lemma = []

        self.ext = []


classifier = text_classification()
read = read_csv()
clas = classification()
proc = pre_processing()

#Read the train and test corpus.
classifier.train_list = read.read_csv("training-full-v13-bkp.csv")
classifier.test_list = read.read_csv("TrialData_SubtaskA_Test.csv", True)

#Separeted the sentences and labels from the train corpus.
train_corpus = []
train_labels = []
for text in classifier.train_list:
    train_corpus.append(text[1])
    train_labels.append(text[2])

#Separeted the sentences and labels from the test corpus.
Exemplo n.º 5
0
import pybullet as p
import pybullet_data
import time
import numpy as np
import pandas as pd
import read_csv

# control value
maxForce = 100
mode = p.POSITION_CONTROL

# read pvt file
file_name = './pvt_data/translation.csv'
read_csv = read_csv.ReadCsv(file_name)
pvt = read_csv()

# set env.
physicsClient = p.connect(p.GUI)  #or p.DIRECT for non-graphical version
p.setAdditionalSearchPath(pybullet_data.getDataPath())  #used by loadURDF
p.setGravity(0, 0, -10)
planeId = p.loadURDF("plane.urdf")
snakeStartPos = [0, 0, 0.5]
snakeStartOrientation = p.getQuaternionFromEuler([0, 0, 0])
snakeId = p.loadURDF("./urdf/trident_snake.urdf", snakeStartPos,
                     snakeStartOrientation)
p.changeDynamics(bodyUniqueId=snakeId, linkIndex=2, lateralFriction=1)
p.changeDynamics(bodyUniqueId=snakeId, linkIndex=5, lateralFriction=1)
p.changeDynamics(bodyUniqueId=snakeId, linkIndex=7, lateralFriction=1)
timestep = 0

# enable joint F/T sensor
Exemplo n.º 6
0
    # Note that a '>' or '<' cannot be encoded with `urlencode`, only `>=` and `<=`.
    "time>": "2017-01-00T00:00:00Z",
    "station": '"urn:ioos:station:wmo:44011"',
    "parameter": '"Significant Wave Height"',
    "unit": '"m"',
}

url = encode_erddap(urlbase, fname, columns, params)

print(unquote(url))

Here is a cool part about ERDDAP `tabledap` - The data `tabledap` `csvp` response can be easily read by Python's pandas `read_csv` function.

from pandas import read_csv

df = read_csv(url, index_col=0, parse_dates=True)

# Prevent :station: from turning into an emoji in the webpage.
df["station"] = df.station.str.split(":").str.join("_")

df.head()

With the `DataFrame` we can easily plot the data.

%matplotlib inline

ax = df["value"].plot(figsize=(11, 2.75), title=df["parameter"][0])

You may notice that slicing the time dimension on the sever side is very fast when compared with an OPeNDAP request. The downloading of the time dimension data, slice, and subsequent downloading of the actual data are all much faster.

ERDDAP also allows for filtering of the variable's values. For example, let's get Wave Heights that are bigger than 6 meters starting from 2016.
Exemplo n.º 7
0
In [1]: from pandas import read_csv, DataFrame
  
In [2]: from pyspark import sql

In [3]: from pysparkling import H2OContext
  
In [4]: from h2o import import_file, H2OFrame 
  
In [5]: ss = sql.SparkSession.builder.getOrCreate()
  
In [6]: hc = H2OContext.getOrCreate(ss)

### Convert Pandas Dataframe to H2OFrame and Spark DataFrame ###

In [7]: p_df = read_csv("Documents/credit_count.txt")
 
In [8]: type(p_df)
Out[8]: pandas.core.frame.DataFrame
 
In [9]: p2s_df = ss.createDataFrame(p_df)
 
In [10]: type(p2s_df)
Out[10]: pyspark.sql.dataframe.DataFrame
 
In [11]: p2h_df = H2OFrame(p_df)
 
In [12]: type(p2h_df)
Out[12]: h2o.frame.H2OFrame
  
### Convert Spark Dataframe to H2OFrame and Pandas DataFrame ###