Exemplo n.º 1
0
    def POST(self):
        web.header('Content-Type', 'text/html')
        web.header('Content-disposition', 'attachment; filename=trenchesCSV.csv')
        kml = web.input()
       
        kml = urllib.unquote(kml["jsondata"])
        x = json.loads(kml)
        csv = []
        csv.append("Trench, Trench Area, Trench Number, Trench ID, Year, Excavator,\n")






        for feature in x["features"]:
            row = []
            row.append(str(feature["properties"]["trench"].replace(",", "-")))
            row.append(str(feature["properties"]["trencharea"].replace(",", "-")))
            row.append(str(feature["properties"]["trenchnumb"].replace(",", "-")))
            row.append(str(feature["properties"]["trenchid"]))
            row.append(str(feature["properties"]["year"]))
            row.append(str(feature["properties"]["excavator"]) + "\n")

            csv.append(",".join(row))
        csv = "".join(csv)
        return csv
Exemplo n.º 2
0
def xls_reader(filename):    
    workbook = xlrd.open_workbook(filename)
    worksheet = workbook.sheet_by_name('potongan')
    num_rows = worksheet.nrows - 1
    num_cells = worksheet.ncols - 1
    curr_row = -1
    csv = []
    while curr_row < num_rows:
        curr_row += 1
        row = worksheet.row(curr_row)
        curr_cell = -1
        txt = []
        while curr_cell < num_cells:
            curr_cell += 1
            # Cell Types: 0=Empty, 1=Text, 2=Number, 3=Date, 4=Boolean, 5=Error, 6=Blank
            cell_type = worksheet.cell_type(curr_row, curr_cell)
            cell_value = worksheet.cell_value(curr_row, curr_cell)
            if cell_type==1 or cell_type==2:
                try:
                    cell_value = str(cell_value)
                except:
                    cell_value = '0'
            else:
                cell_value = clean(cell_value)
                
            if curr_cell==0 and cell_value.strip()=="Tanggal":
                curr_cell=num_cells
            elif curr_cell==0 and cell_value.strip()=="":
                curr_cell = num_cells
                curr_row = num_rows
            else:
                txt.append(cell_value)
        if txt:
            csv.append(txt)
    return csv        
Exemplo n.º 3
0
	def makeSheet( self ):
		csv = [ 'County,Type,All 2000,All 2008,All % Change,Dem 2000,Dem 2008,Dem % Change,GOP 2000,GOP 2008,GOP % Change,All 18-24,All 25-34,All 35-44,All 45-54,All 55-64,All 65-74,All 75+,Dem 18-24,Dem 25-34,Dem 35-44,Dem 45-54,Dem 55-64,Dem 65-74,Dem 75+,GOP 18-24,GOP 25-34,GOP 35-44,GOP 45-54,GOP 55-64,GOP 65-74,GOP 75+,White,Black,Asian,Other,Catholic,Evangelical,Mainline,Jewish,Other,None,Casey,Rendell' ]
		for place in self.places:
			pop = place['population'];  popAll = pop['all'];  popDem = pop['dem'];  popGop = pop['gop']
			ages = place['ages'];  ageAll = ages['all']['counts'];  ageDem = ages['dem']['counts']; ageGop = ages['gop']['counts']
			ethnic = place['ethnic']
			religion = place['religion']['percents']
			gub2002 = place['gub2002']
			csv.append(
				'%s,%s,%d,%d,%.2f%%,%d,%d,%.2f%%,%d,%d,%.2f%%,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%.2f%%,%.2f%%,%.2f%%,%.2f%%,%.2f%%,%.2f%%,%.2f%%,%.2f%%' %(
					place['name'], pop['type'],
					popAll['before'], popAll['after'], popAll['change'],
					popDem['before'], popDem['after'], popDem['change'],
					popGop['before'], popGop['after'], popGop['change'],
					ageAll[0], ageAll[1], ageAll[2], ageAll[3], ageAll[4], ageAll[5], ageAll[6],
					ageDem[0], ageDem[1], ageDem[2], ageDem[3], ageDem[4], ageDem[5], ageDem[6],
					ageGop[0], ageGop[1], ageGop[2], ageGop[3], ageGop[4], ageGop[5], ageGop[6],
					ethnic[0], ethnic[1], ethnic[2], ethnic[3],
					religion[0], religion[1], religion[2], religion[3], religion[4], religion[5],
					gub2002[0], gub2002[1]
				)
			)
		write(
			'%s/states/%s/spreadsheet.csv' %( datapath, self.state ),
			'\n'.join(csv)
		)
Exemplo n.º 4
0
 def makeSheet(self):
     csv = [
         'County,Type,All 2000,All 2008,All % Change,Dem 2000,Dem 2008,Dem % Change,GOP 2000,GOP 2008,GOP % Change,All 18-24,All 25-34,All 35-44,All 45-54,All 55-64,All 65-74,All 75+,Dem 18-24,Dem 25-34,Dem 35-44,Dem 45-54,Dem 55-64,Dem 65-74,Dem 75+,GOP 18-24,GOP 25-34,GOP 35-44,GOP 45-54,GOP 55-64,GOP 65-74,GOP 75+,White,Black,Asian,Other,Catholic,Evangelical,Mainline,Jewish,Other,None,Casey,Rendell'
     ]
     for place in self.places:
         pop = place['population']
         popAll = pop['all']
         popDem = pop['dem']
         popGop = pop['gop']
         ages = place['ages']
         ageAll = ages['all']['counts']
         ageDem = ages['dem']['counts']
         ageGop = ages['gop']['counts']
         ethnic = place['ethnic']
         religion = place['religion']['percents']
         gub2002 = place['gub2002']
         csv.append(
             '%s,%s,%d,%d,%.2f%%,%d,%d,%.2f%%,%d,%d,%.2f%%,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%.2f%%,%.2f%%,%.2f%%,%.2f%%,%.2f%%,%.2f%%,%.2f%%,%.2f%%'
             %
             (place['name'], pop['type'], popAll['before'], popAll['after'],
              popAll['change'], popDem['before'], popDem['after'],
              popDem['change'], popGop['before'], popGop['after'],
              popGop['change'], ageAll[0], ageAll[1], ageAll[2], ageAll[3],
              ageAll[4], ageAll[5], ageAll[6], ageDem[0], ageDem[1],
              ageDem[2], ageDem[3], ageDem[4], ageDem[5], ageDem[6],
              ageGop[0], ageGop[1], ageGop[2], ageGop[3], ageGop[4],
              ageGop[5], ageGop[6], ethnic[0], ethnic[1], ethnic[2],
              ethnic[3], religion[0], religion[1], religion[2], religion[3],
              religion[4], religion[5], gub2002[0], gub2002[1]))
     write('%s/states/%s/spreadsheet.csv' % (datapath, self.state),
           '\n'.join(csv))
    def _csv_lines(self, cr, uid, travel_orders, context=None):
        csv = []
        self._set_decimal_point(self._decimal())
        for to in travel_orders:
            self._travel_order_name = to.name
            # hr.travel.order
            document_csv = self._document(cr, uid, to, context)
            csv.append(document_csv)
            # daily.allowance.lines
            if to.daily_allowance_ids:
                allowances_csv = self._allowances(cr, uid, to.daily_allowance_ids, to, context)
                csv.append(allowances_csv)
            # hr.expense.line
            if to.expense_line_ids:
                expenses_csv = self._expenses(cr, uid, to.expense_line_ids, to, context)
                csv.append(expenses_csv)
            # append itinerrary to expenses hr.travel.opder.itinerary.lines
            if to.itinerary_ids:
                itinerary_csv =self._itinerary_expenses(cr, uid, to.itinerary_ids, to, context)
                csv.append(itinerary_csv)
            # append daily allowances to expenses daily.allowance.lines
            if to.daily_allowance_ids:
                allowance_csv =self._daily_allowance_expenses(cr, uid, to.daily_allowance_ids, to, context)
                csv.append(allowance_csv)

        self._set_decimal_point()  # reset decimal point to default
        return csv
Exemplo n.º 6
0
def predict_on_images(model, images, config):
    tw = 224
    th = 224
    mean = config['mean']
    mean = np.array(mean) * 255
    classes = config['class_names']
    csv = []
    image = np.zeros((len(images), 3, th, tw), dtype=np.float32)
    for i, path in enumerate(images):
        img = Image.open(path)
        npimg = np.array(img).astype(np.float32)
        h = npimg.shape[0]
        w = npimg.shape[1]
        assert h >= th
        assert w >= tw
        assert npimg.shape[2] == 3
        dr = (h - th) // 2
        dc = (w - tw) // 2
        # RGB 2 BGR
        for k in range(3):
            image[i, 2 - k, :, :] = npimg[dr:dr + th, dc:dc + tw, k] - mean[k]
    res = model.eval(image)
    for i in range(len(images)):
        index = np.argmax(res[i]).item()
        csv.append([images[i], str(index), classes[index]] +
                   ['%8.6f' % v for v in res[i].tolist()])
    with open('report.csv', 'w') as f:
        for row in csv:
            line = ','.join(row) + '\n'
            f.write(line)
            sys.stdout.write(','.join(row[0:10] + ['...']) + '\n')
    return res
def predict_on_images(model,images,device,config):
    tw = 224
    th = 224
    mean = config['mean']
    std = config['std']
    classes = config['class_names']
    csv = []
    model.eval()
    image = torch.zeros((len(images),3,th,tw),dtype=torch.float32)
    for i,path in enumerate(images):
        img = PIL.Image.open(path)
        npimg = np.array(img).astype(np.float32) * (1.0 / 255)
        h = npimg.shape[0]
        w = npimg.shape[1]
        assert h>=th
        assert w>=tw
        assert npimg.shape[2] == 3
        fact = 1.0 / np.array(std)
        off  = -np.array(mean) * fact
        dr = (h - th) // 2
        dc = (w - tw) // 2
        for k in range(3):
            image[i,k,:,:] = torch.from_numpy(npimg[dr:dr+th,dc:dc+tw,k] * fact[k] + off[k])
    image = image.to(device)
    res = model(image)
    for i in range(len(images)):
        index = torch.argmax(res[i]).item()
        csv.append([path,str(index),classes[index]] + ['%8.6f' % v for v in res[i].tolist()])
    with open('report.csv','w') as f:
        for row in csv:
            line = ','.join(row) + '\n'
            f.write(line)
            sys.stdout.write(','.join(row[0:10] + ['...']) + '\n')
Exemplo n.º 8
0
def xls_reader(filename):
    workbook = xlrd.open_workbook(filename)
    worksheet = workbook.sheet_by_name('potongan')
    num_rows = worksheet.nrows - 1
    num_cells = worksheet.ncols - 1
    curr_row = -1
    csv = []
    while curr_row < num_rows:
        curr_row += 1
        row = worksheet.row(curr_row)
        curr_cell = -1
        txt = []
        while curr_cell < num_cells:
            curr_cell += 1
            # Cell Types: 0=Empty, 1=Text, 2=Number, 3=Date, 4=Boolean, 5=Error, 6=Blank
            cell_type = worksheet.cell_type(curr_row, curr_cell)
            cell_value = worksheet.cell_value(curr_row, curr_cell)
            if cell_type == 1 or cell_type == 2:
                try:
                    cell_value = str(cell_value)
                except:
                    cell_value = '0'
            else:
                cell_value = clean(cell_value)

            if curr_cell == 0 and cell_value.strip() == "Tanggal":
                curr_cell = num_cells
            elif curr_cell == 0 and cell_value.strip() == "":
                curr_cell = num_cells
                curr_row = num_rows
            else:
                txt.append(cell_value)
        if txt:
            csv.append(txt)
    return csv
Exemplo n.º 9
0
    def _csv_lines(self, cr, uid, travel_orders, context=None):
        csv = []
        self._set_decimal_point(self._decimal())
        for to in travel_orders:
            self._travel_order_name = to.name
            # hr.travel.order
            document_csv = self._document(cr, uid, to, context)
            csv.append(document_csv)
            # daily.allowance.lines
            if to.daily_allowance_ids:
                allowances_csv = self._allowances(cr, uid,
                                                  to.daily_allowance_ids, to,
                                                  context)
                csv.append(allowances_csv)
            # hr.expense.line
            if to.expense_line_ids:
                expenses_csv = self._expenses(cr, uid, to.expense_line_ids, to,
                                              context)
                csv.append(expenses_csv)
            # append itinerrary to expenses hr.travel.opder.itinerary.lines
            if to.itinerary_ids:
                itinerary_csv = self._itinerary_expenses(
                    cr, uid, to.itinerary_ids, to, context)
                csv.append(itinerary_csv)
            # append daily allowances to expenses daily.allowance.lines
            if to.daily_allowance_ids:
                allowance_csv = self._daily_allowance_expenses(
                    cr, uid, to.daily_allowance_ids, to, context)
                csv.append(allowance_csv)

        self._set_decimal_point()  # reset decimal point to default
        return csv
Exemplo n.º 10
0
def gen_swc_csv(root_dir=dir):

    csv = []

    with open("transcriptions.txt", 'r') as f:
        lines = f.readlines()

    i = 0
    for line in lines:
        i += 1
        file_name = line.split(" ", 1)[0]
        file_text = line.split(" ", 1)[1]

        sentence = file_text.split(" ")
        if len(sentence) <= 2:
            continue

        trans = clean_sentence(file_text)
        file_path = os.path.join(root_dir, file_name + ".wav")
        csv.append((file_path, trans))
        print("File " + str(i) + " / " + str(len(lines)), end='\r')

    print()
    print("Writing CSV File:")
    df = pandas.DataFrame(data=csv)
    output_file = "/home/GPUAdmin1/asr/train_csvs/swc_train.csv"
    df.to_csv(output_file, header=False, index=False, sep=",")
Exemplo n.º 11
0
def atividade2_process():
    url = 'http://www.imdb.com/chart/boxoffice'
    page = download(url)
    #print(page)

    soup = BeautifulSoup(page, 'html5lib')
    table = soup.find("tbody")
    filmes = table.find_all("tr")
    lista = []
    csv = []
    for filme in filmes:
        nome = filme.find("td", class_="titleColumn").a.text
        rating = filme.find("td", class_="ratingColumn").text
        rating = re.sub(r'[^\d$.M]', '', rating)
        rating_number = re.sub(r'[^\d.]', '', rating)
        gross = filme.find_all("td", class_="ratingColumn")
        gross_span = [
            span.find("span", class_="secondaryInfo") for span in gross
        ]
        gross_value = gross_span[1].text
        weeks = filme.find("td", class_="weeksColumn").text
        lista_csv = [nome, rating, gross_value, weeks]
        csv.append(lista_csv)
        valor = {
            "nome": nome,
            "rating": rating,
            "gross_value": gross_value,
            "weeks": weeks
        }
        lista.append(valor)

    return {"lista": lista, "csv": csv}
Exemplo n.º 12
0
    def POST(self):
        web.header('Content-Type', 'text/html')
        web.header('Content-disposition', 'attachment; filename=artifactsCSV.csv')
        kml = web.input()
       
        kml = urllib.unquote(kml["jsondata"])
        x = json.loads(kml)
        csv = []
        csv.append("Artifact ID, Trench, Description, Fabric, Chronology, X Coordinate, Y Coordinate\n")






        for feature in x["features"]:
            row = []
            row.append(feature["properties"]["catalogid"].replace(",", "-"))
            row.append(feature["properties"]["trench"].replace(",", "-"))
            row.append(feature["properties"]["name"].replace(",", "-"))
            row.append(feature["properties"]["fabric"].replace(",", "-"))
            row.append(feature["properties"]["chronology"].replace(",", "-"))
            row.append(str(feature["geometry"]["coordinates"][0]))
            row.append(str(feature["geometry"]["coordinates"][1]) + "\n")

            csv.append(",".join(row))
        csv = "".join(csv)
        return csv
Exemplo n.º 13
0
def processa_titulos(session, titulos, operacao):
    csv = []

    for tit in titulos:
        # Skipa os nao liquidados
        if tit['Situacao'] != 'Realizado':
            continue

        url = "https://portalinvestidor.tesourodireto.com.br/Protocolo/{}/{}".format(
            tit['CodigoProtocolo'], operacao)
        print("Fetching protocolo={}, url={}".format(tit['CodigoProtocolo'],
                                                     url))
        response = session.get(
            url,
            headers={
                'Referer':
                'https://portalinvestidor.tesourodireto.com.br/Consulta',
            })
        soup = BeautifulSoup(response.content, 'html.parser')

        titulo = soup.find(class_='td-protocolo-info-titulo').text
        quantidade = float(get_info_titulo(soup, 0))
        valor_unitario = str(float(get_info_titulo(soup, 1))).replace('.', ',')
        rentabilidade = str(float(get_info_titulo(soup, 2)) / 100).replace(
            '.', ',')
        liquido = float(get_info_titulo(soup, 3))
        taxa_corretora = str(-float(get_info_titulo(soup, 4))).replace(
            '.', ',')
        taxa_b3 = str(-float(get_info_titulo(soup, 5))).replace('.', ',')
        valor_bruto = float(get_info_titulo(soup, 6))

        if operacao == OPERACAO_VENDA:
            quantidade = -quantidade
            liquido = -liquido
            valor_bruto = -valor_bruto

        quantidade = str(quantidade).replace('.', ',')
        liquido = str(liquido).replace('.', ',')
        valor_bruto = str(valor_bruto).replace('.', ',')

        data_operacao = datetime.datetime.strptime(
            tit["DataOperacao"],
            "%d/%m/%Y")  # uses datetime object for sorting
        line = [
            tit["TipoOperacao"], data_operacao, tit["CodigoProtocolo"], titulo,
            rentabilidade, quantidade, valor_unitario, valor_bruto,
            taxa_corretora, taxa_b3, liquido
        ]

        if IS_DEBUG:
            file = open('protocolos/{}.html'.format(tit['CodigoProtocolo']),
                        "w")
            file.write(str(response.content))
            file.close()
            print(url)
            print(line)

        csv.append(line)

    return csv
Exemplo n.º 14
0
def scrape_city_and_states(url, csv, limit):
    cities_and_states = []
    i = 0
    limit_count = 0
    for name in scraper(url).find_all('td'):
        z = (name.text).split('[')
        if (not hasNum(z[0])):
            zz = z[0].split('\n')
            if (i == 0):
                x = zz[0]
                i = 1
            else:
                i = 0
                y = zz[0]
                limit_count += 1
                csv.append([
                    str(x.encode('utf-8')) + " " + str(y.encode('utf-8')), 0,
                    0, 0, 0, 0, 0, 0, 0, 0
                ])
                cities_and_states.append(
                    (str(x.encode('utf-8')), str(y.encode('utf-8'))))
                if (limit_count == limit):
                    break
        else:
            continue
    return cities_and_states
Exemplo n.º 15
0
def files(path):
    csv = []
    extension = "csv"
    files = os.listdir(path)
    for f in files:
        if f.endswith(extension):
            csv.append(f)
    return csv
def files(path):
    csv = []
    extension = "csv"
    files = os.listdir(path)
    for f in files:
        if f.endswith(extension):
            csv.append(f)
    return csv
Exemplo n.º 17
0
def profile(stockname="MSFT"):
    url = 'https://ca.finance.yahoo.com/quote/%s/profile?p=%s&.tsrc=fin-srch' % (
        stockname, stockname)
    obj = requests.get(url)
    souppage = soup(obj.text, "html.parser")
    tableithink = souppage.findAll("span")
    lists = []
    for row in tableithink:
        lists.append(row)
    i = 0
    for i in range(len(lists)):
        lists[i] = str(lists[i]).split(">")
        if (len(lists[i]) == 5):
            lists[i] = lists[i][2].replace("<!-- /react-text --", '').replace(
                "</span", '').replace("amp; ", " ")
        #print(lists[i])
    i = 0
    done = 1
    while i < (len(lists)):
        if ("Mr" or "Ms") in lists[i] and done:
            j = i
            done = 0
            i += 1
        elif (lists[i] == "N/A"):
            lists.pop(i + 1)
            i += 1
        else:
            i += 1

    csv = []

    k = j + 25
    while j < k:
        csv.append(
            [lists[j], lists[j + 1], lists[j + 2], lists[j + 3], lists[j + 4]])
        j += 5
    #print(csv)

    finalcsv = ''
    for x in csv:
        for i in x:
            finalcsv += i.replace(",", " ") + ","
        finalcsv += "\n"
    f = open('profile.csv', 'w')
    f.write(finalcsv)
    f.close()
    print("")
    paraithink = souppage.findAll("p")
    print(paraithink[2].text)
    print("")

    for x in csv:
        print(x)

    print("")
    jsonMaker4()
Exemplo n.º 18
0
def get_medias_dia(file, csv):
    valores = []
    date = get_date(file)
    dia = date[0]
    table = csv_reader(file)
    acumulado = get_acumulado_dia(table)
    n_corridas = get_numero_corridas_dia(table)
    pon_poff_values = get_pon_poff(table, 29.5, 9.5)
    sucata = get_sucata(table)
    potencia_mw = get_potencia_mw(table)
    potencia_kwh = get_potencia_kwh(table)
    kwh_t = get_acumulado_dia_kwh_t(table)
    kwh_min = get_acumulado_dia_kwh_min(table)
    lan_o2 = get_lan_o2(table)
    carvao = get_carvao(table)
    cj_o2 = get_cj_o2(table)
    cj_gn = get_cj_gn(table)

    index = check_dia(csv, dia)
    if (index == -1):
        valores.append(dia)
        valores.append(acumulado)
        valores.append(n_corridas)
        valores.append(pon_poff_values[0])
        valores.append(pon_poff_values[1])
        valores.append(pon_poff_values[2])
        valores.append(pon_poff_values[3])
        valores.append(sucata)
        valores.append(potencia_mw)
        valores.append(potencia_kwh)
        valores.append(kwh_t)
        valores.append(kwh_min)
        valores.append(lan_o2)
        valores.append(carvao)
        valores.append(cj_o2)
        valores.append(cj_gn)

        csv.append(valores)
    else:
        csv[index][1] = float(csv[index][1]) + float(acumulado)
        csv[index][2] = float(csv[index][2]) + float(n_corridas)
        csv[index][3] = float(csv[index][3]) + float(pon_poff_values[0])
        csv[index][4] = float(csv[index][4]) + float(pon_poff_values[1])
        csv[index][5] = float(csv[index][5]) + float(pon_poff_values[2])
        csv[index][6] = float(csv[index][6]) + float(pon_poff_values[3])
        csv[index][7] = float(csv[index][7]) + float(sucata)
        csv[index][8] = float(csv[index][8]) + float(potencia_mw)
        csv[index][9] = float(csv[index][9]) + float(potencia_kwh)
        csv[index][10] = float(csv[index][10]) + float(kwh_t)
        csv[index][11] = float(csv[index][11]) + float(kwh_min)
        csv[index][12] = float(csv[index][12]) + float(lan_o2)
        csv[index][13] = float(csv[index][13]) + float(carvao)
        csv[index][14] = float(csv[index][14]) + float(cj_o2)
        csv[index][15] = float(csv[index][15]) + float(cj_gn)

    return csv
Exemplo n.º 19
0
def load_csv(filename, sep=',', training_indices=(20, -20), runningTest=True):
    csv = []
    with open(filename, 'r') as file:
        for line in file:
            line = line.split(sep)

            if training_indices != None:
                if runningTest == True:
                    del line[training_indices[0]:training_indices[1]]
                else:
                    line = line[training_indices[0]:training_indices[1]]

            csv.append([float(j) for j in line])
    return np.array(csv, dtype=np.float64)
Exemplo n.º 20
0
def list_csv(input_dir):
    csv = []
    try:
        files = os.listdir(input_dir)
    except:
        return csv

    for i in files:
        if i[0] == '.':
            continue
        if len(i) < 3:
            continue
        if i[-4:] == '.csv':
            csv.append(i)
    return csv
Exemplo n.º 21
0
def rep_to_csv(rep):
    csv = [[]]
    header = []
    header.append("Branch")
    for key, value in rep.items():
        row = []
        row.append(key)
        for branch_key, branch_value in value.items():
            if header.count(branch_key) == 0:
                header.append(branch_key)
                row.append(branch_value)
            else:
                row.insert(header.index(branch_key), branch_value)
        csv.append(row)
    csv.insert(0, header)
    return csv
Exemplo n.º 22
0
def format_csv(data):
    import csv    
    features = data[0]['features']

    # build header
    header = []
    for feature in features:
        feature.update(feature['properties'])
        if 'Taxonomy' in feature and feature['Taxonomy'] is not None:
            feature.update(feature['Taxonomy'])
            del feature['Taxonomy']
        if feature['geometry'] is not None:
            feature.update({"Longitude": feature['geometry']['coordinates'][0], "Latitude": feature['geometry']['coordinates'][1]})
        del feature['properties']
        del feature['geometry']
        for key in feature:            
            if key not in header:
                header.append(key)
    header.sort()
    log.debug(header)

    # populate rows
    csv = []
    csv.append(','.join(header))
    with open('data.csv', 'w', newline='') as csvfile:
        for feature in features:
            row = []
            for column in header:
                if column in feature:
                    value = feature[column]
                    if type(value) == str:
                        value = strings.singlespace(value)
                        value.replace('"', "'")
                        value = "%s" % value
                    row.append(str(value).replace(",", ""))
                else:
                    row.append("None")
            csv.append(','.join(row))
    return '\n'.join(csv)

    # print(json.dumps(features, indent=4, default=lambda x: str(x)))
Exemplo n.º 23
0
def financials(stockname="MSFT"):
    url = 'https://ca.finance.yahoo.com/quote/%s/financials' % (stockname)
    obj = requests.get(url)
    souppage = soup(obj.text, "html.parser")
    tableithink = souppage.findAll("span")
    lists = []
    for row in tableithink:
        lists.append(row)
    for i in range(len(lists)):
        lists[i] = str(lists[i]).split(">")[1].replace("</span", "")
        #print(lists[i])
    for i in range(len(lists)):
        if (lists[i] == "Revenue"):
            j = i
        elif lists[i] == "":
            break
    csv = []
    #print(i)
    while j < i:
        if (any(k.isdigit() for k in lists[j + 1])):
            csv.append([
                lists[j], lists[j + 1], lists[j + 2], lists[j + 3],
                lists[j + 4]
            ])
            j += 5
        else:
            csv.append([lists[j], " ", " ", " ", " "])
            j += 1
    finalcsv = ''
    for x in csv:
        for i in x:
            finalcsv += i.replace(",", " ") + ","
        finalcsv += "\n"

    f = open('financials.csv', 'w')
    f.write(finalcsv)
    f.close()
    for x in csv:
        print(x)
    jsonMaker3()
Exemplo n.º 24
0
def createCsv(tArr):

    header = ["created_at", "views", "addViews", "signatures", "addSignatures"]
    csv = []
    csv.append("created_at,views,addViews,signatures,addSignatures")
    delimiter = ","
    for tEle in tArr:

        line = ""

        for i, key in enumerate(header):

            line += str(tEle[key])

            if i != (len(header) - 1):

                line += delimiter

        csv.append(line)

        #print(line)
    return csv
Exemplo n.º 25
0
def view_att():
    if request.method == "POST":
        query_date = request.args.get('date_time')
        query_date = str(query_date)

        att_final = Update_form.query.filter_by(date_created=query_date).all()
        csv = []

        for i in list(att_final):
            csv.append(str(i).split("-")[1].strip())

        csv = ",".join(csv)

        return Response(csv,
                        mimetype="text/csv",
                        headers={
                            "Content-disposition":
                            "attachment; filename=Attendance-" + query_date +
                            ".csv"
                        })

    return ("Hello World!{}".format(att_final))
Exemplo n.º 26
0
def main(_):
    parser = ArgumentParser(
        description="This is a small tool for converting a CSV playlist file "
        "generated by e.g. Microsoft Excel to JSON playlist files, "
        "which can be used by AVTrack360. If you want to use this tool for "
        "generating the randomized playlists, "
        "you have to set the parameters directly in the code.")
    parser.add_argument(
        "-csvfile",
        dest="csvfile",
        help="The path of the CSV file you want to convert. Default: dummy.csv",
        type=str)
    arg = parser.parse_args()
    hrcs = 8
    srcs = 8
    pvss = hrcs * srcs
    extension = "mkv"
    projection_scheme = "barrel360"
    hmd = "vive"
    subjects = 32
    csv = ["label;filename;extension;projectionscheme;hmd"]

    if arg.csvfile:
        convert_to_avtrack360_playlist(arg.csvfile)
    else:
        for subject in range(1, subjects + 1):
            csv_subject = []
            for src in range(1, srcs + 1):
                for hrc in range(1, hrcs + 1):
                    csv_subject.append(
                        "%s;SRC%s_HRC%03d;%s;%s;%s" %
                        (subject, src, hrc, extension, projection_scheme, hmd))
            shuffle(csv_subject)
            for element in csv_subject:
                csv.append(element)
        with open('playlists\\generated_playlist.csv', mode='w') as csv_file:
            for element in csv:
                csv_file.write("%s\n" % element)
        convert_to_avtrack360_playlist("generated_playlist.csv")
Exemplo n.º 27
0
 def GET(self):
     user = accounts.get_current_user()
     username = user.key.split('/')[-1]
     books = Bookshelves.get_users_logged_books(username, limit=10000)
     csv = []
     csv.append('Work Id,Edition Id,Bookshelf\n')
     mapping = {
         1: 'Want to Read',
         2: 'Currently Reading',
         3: 'Already Read'
     }
     for book in books:
         row = [
             'OL{}W'.format(book['work_id']), 'OL{}M'.format(
                 book['edition_id']) if book['edition_id'] else '',
             '{}\n'.format(mapping[book['bookshelf_id']])
         ]
         csv.append(','.join(row))
     web.header('Content-Type', 'text/csv')
     web.header('Content-disposition',
                'attachment; filename=OpenLibrary_ReadingLog.csv')
     csv = ''.join(csv)
     return delegate.RawText(csv, content_type="text/csv")
Exemplo n.º 28
0
def search(tweet):
    for key in _emojis.keys():
        # print key
        # emojis = _emojis[key]
        # yield emojis
        if key in tweet:
            # print "key: %s, value: %s" % (key, _emojis[key])
            yield _emojis[key]
            print _emojis[key]
        # else: return False

# def remove_left(f):
    num = 0
    csv = []
    while True:
        data = f.readline()
        if data == '':
            break
        if (num < 1 or num > 8):
            csv.append(data)
        num += 1
    f.seek(14)
    for row in csv:
        f.write(row)
Exemplo n.º 29
0
 def _csv_lines(self, cr, uid, ids, context=None):
     csv = []
     for id in ids:
         (invoice_data, invoice_number, partner_id, lines) = self._document(cr, uid, id, context)
         csv.append(invoice_data)
         (partner_data, partner_name) = self._partner(cr, uid, partner_id, context)
         csv.append(partner_data)
         csv.append(self._items(cr, uid, lines, context))
     
     return (csv, invoice_number, partner_name)
Exemplo n.º 30
0
    def _csv_lines(self, cr, uid, ids, context=None):
        csv = []
        for id in ids:
            (invoice_data, invoice_number, partner_id,
             lines) = self._document(cr, uid, id, context)
            csv.append(invoice_data)
            (partner_data,
             partner_name) = self._partner(cr, uid, partner_id, context)
            csv.append(partner_data)
            csv.append(self._items(cr, uid, lines, context))

        return (csv, invoice_number, partner_name)
Exemplo n.º 31
0
def xml_converter():
    tree = ET.parse("book_catalog.xml")
    root = tree.getroot()
    csv = []
    for child in root:
        book = ["{0} id={1}".format(child.tag, child.get("id"))]
        csv.append(book)
        row = []
        if (len(csv) == 1):
            csv.append([
                child[0].tag, child[1].tag, child[0].tag, child[3].tag,
                child[4].tag, child[5].tag
            ])
        for sub_child in child:
            row.append(sub_child.text)
        csv.append(row)
    return csv
def find_csv():
    csv = []
    for f in os.listdir(os.getcwd()):
        if f.startswith("wikipedia_contributors") and f.endswith(".csv"):
            csv.append(f)
    return csv
Exemplo n.º 33
0
data = params['data']
d = {}

csvfile = open('test.csv', 'w', newline='')
writer = csv.writer(csvfile)
writer.writerow(['num', 'h', 'd', 'a'])
for key in data.keys():
    csv = []
    d['id'] = data[key]['id']
    d['a'] = float(data[key]['had']['a'])
    d['d'] = float(data[key]['had']['d'])
    d['h'] = float(data[key]['had']['h'])
    d['num'] = data[key]['num']
    d['h_cn'] = data[key]['h_cn']
    d['h_id'] = data[key]['h_id']
    d['a_cn'] = data[key]['a_cn']
    d['a_id'] = data[key]['a_id']
    d['date'] = data[key]['date']
    d['time'] = data[key]['time']
    print(d)
    #db.match.insert(d)
    csv.append(d['num'])
    csv.append(d['h'])
    csv.append(d['d'])
    csv.append(d['a'])
    writer.writerow(csv)
csvfile.close()
# resp = res.decode(encoding='utf-8').split('(')[1].split(')')[0]
# params = json.loads(resp)
# print(resp)
Exemplo n.º 34
0
    def print_report(self, cr, uid, ids, context=None):
        o = self.browse(cr, uid, ids)[0]
        if context is None:
            context = {}
        
        csv = []
        debit = 0.0; credit = 0.0; cash = 0.0; transfer = 0.0
        
        obj_product = self.pool.get('product.product')
        obj_invoice = self.pool.get('account.invoice')
        obj_order = self.pool.get('sale.order')
        obj_order_line = self.pool.get('sale.order.line')
        obj_pos = self.pool.get('pos.order')
        obj_pos_line = self.pool.get('pos.order.line')
        obj_picking = self.pool.get('stock.picking')
        obj_stock_move = self.pool.get('stock.move')
        obj_move = self.pool.get('stock.order')
        obj_move_line = self.pool.get('account.move.line')
            
        if o.name == 'stock' :
            out = False; inn = False
            no = 0
            for x in o.opname_id.inventory_line_id:
                no += 1
                out = obj_stock_move.search(cr, uid, [
                                                     ('product_id', '=', x.product_id.id),
                                                     ('location_id', '=', o.shop_id.warehouse_id.lot_stock_id.id), 
                                                     ('date', '>=', o.opname_id.date), 
                                                     ('date', '<=', o.dari), 
                                                     ('state', '=', 'done'),
                                                     ('name', 'ilike', 'INV')
                                                     ])
                inn = obj_stock_move.search(cr, uid, [
                                                     ('product_id', '=', x.product_id.id),
                                                     ('location_dest_id', '=', o.shop_id.warehouse_id.lot_stock_id.id), 
                                                     ('date', '>=', o.opname_id.date), 
                                                     ('date', '<=', o.dari), 
                                                     ('state', '=', 'done'),
                                                     ('name', 'ilike', 'INV')
                                                     ])
                
                masuk = sum([x.product_qty for x in obj_stock_move.browse(cr, uid, inn)]) 
                keluar = sum([x.product_qty for x in obj_stock_move.browse(cr, uid, out)])
                total = x.product_qty + masuk - keluar
                 
                csv.append([no, x.product_id.partner_ref, x.product_qty, masuk, keluar, total])
                    
    
        elif o.name == 'shop' or o.name == 'shopdf' :
            pid = obj_pos.search(cr, uid, [('shop_id', '=', o.shop_id.id), ('date_order', '>=', o.dari), ('date_order', '<=', o.dari), ('state', 'not in', ('draft', 'cancel'))])
            pad = obj_pos_line.search(cr, uid, [('order_id','in', pid)])
            sid = obj_order.search(cr, uid, [('shop_id', '=', o.shop_id.id), ('date_order', '>=', o.dari), ('date_order', '<=', o.dari), ('state', 'not in', ('draft', 'cancel', 'shipping_except', 'invoice_except'))])
            
            back = datetime.datetime.strptime(o.dari,'%Y-%m-%d') - datetime.timedelta(days=30)
            bid = obj_order.search(cr, uid, [('shop_id', '=', o.shop_id.id), ('date_order', '>=', str(back)), ('date_order', '<', o.dari), ('state', 'not in', ('draft', 'cancel', 'shipping_except', 'invoice_except'))])
            origin =  [x.name for x in obj_order.browse(cr, uid, bid)]
            yid = obj_invoice.search(cr, uid, [('origin', 'in', origin), ('state', '!=', 'draft')])
            number = [x.number for x in obj_invoice.browse(cr, uid, yid)]
            nid = obj_move_line.search(cr, uid, [('name', 'in', number), ('date', '>=', o.dari), ('date', '<=', o.dari)])
            
            
            if sid:
                amount_total = 0.0; tot_inv = 0.0; payment = 0.0
                for x in obj_order.browse(cr, uid, sid):
                    iid = obj_invoice.search(cr, uid, [('origin', '=', x.name), ('state', '!=', 'draft')])
                    
                    inv = '-'; sisa = 0.0
                    if iid:
                        iad = obj_invoice.browse(cr, uid, iid)[0]
                        inv = iad.number; sisa = iad.residual
                    else:
                        sisa = '-'
                    
                    amount_total += x.amount_total
                    csv.append([
                                x.name,
                                x.shop_id.name,
                                [i.product_id.partner_ref for i in x.order_line],
                                sum([i.product_uom_qty for i in x.order_line]),
                                '-',
                                x.partner_id.name, 
                                x.amount_total,
                                inv,
                                sisa,
                                '-', '-', '-', '-'
                    ])
                    
                    if iid:
                        for i in iad.payment_ids:
                            payment += i.credit
                            if i.journal_id.jenis == "db":
                                debit += i.credit
                            elif i.journal_id.jenis == "cr":
                                credit += i.credit 
                            elif i.journal_id.jenis == "cash":
                                cash += i.credit 
                            elif i.journal_id.jenis == "transfer":
                                transfer += i.credit
                            csv.append([
                                        '-', '-', '-', '-', '-', time.strftime('%d %B %Y', time.strptime(i.date,'%Y-%m-%d')), '-', '-', '-',
                                        i.ref, i.date, i.credit, i.setor
                            ])
                            
                
                if nid :
                    csv.append(['-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-'])
                    for i in obj_move_line.browse(cr, uid, nid):
                        so = '-'
                        cari = obj_invoice.search(cr, uid, [('number', '=', i.name)])
                        dpt = obj_invoice.browse(cr, uid, cari)
                        if dpt:
                            so = dpt[0].origin
            
                        payment += i.credit
                        if i.journal_id.jenis == "db":
                            debit += i.credit
                        elif i.journal_id.jenis == "cr":
                            credit += i.credit 
                        elif i.journal_id.jenis == "cash":
                            cash += i.credit 
                        elif i.journal_id.jenis == "transfer":
                            transfer += i.credit
                        csv.append([
                                    so, '-', 'Pelunasan Invoice ' + i.name, '-', '-', time.strftime('%d %B %Y', time.strptime(i.date,'%Y-%m-%d')), '-', '-', '-',
                                    i.ref, i.date, i.credit, i.setor
                        ])

                csv.append(['-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-'])
                csv.append([
                            'Total Sales Order',
                            '-',
                            '-',
                            '-',
                            '-',
                            '-', 
                            amount_total,
                            '-',
                            0.0,
                            '-', '-', payment, '-'
                ])
                csv.append(['-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-'])
                    
            if pad:
                pos = ''; outlet = '-'; cake = ''; harga = 0
                if o.name == 'shopdf':
                    for a in obj_pos.browse(cr, uid, pid):
                        for i in a.statement_ids: 
                            if i.journal_id.jenis == "db":
                                debit += i.amount
                            elif i.journal_id.jenis == "cr":
                                credit += i.amount 
                            elif i.journal_id.jenis == "cash":
                                cash += i.amount 
                            elif i.journal_id.jenis == "transfer":
                                transfer += i.amount
                             
                    for x in obj_pos_line.browse(cr, uid, pad):
                        pos = 'Rekapan POS'
                        outlet = x.order_id.shop_id.name
                        cake = 'Rekapan Product'
                        harga += x.price_subtotal
                        csv.append([
                                    x.order_id.name, 
                                    outlet,
                                    x.product_id.partner_ref,
                                    x.qty,
                                    '-', '-', '-', '-', '-', '-', '-', x.price_subtotal, '-'
                        ])
                         
                    csv.append([
                                pos,
                                outlet,
                                cake,
                                '-',
                                '-', '-', 0, '-', 0, '-', '-', harga, '-'
                        ])
                else: 
                    for x in obj_pos_line.browse(cr, uid, pad):
                        pos += x.order_id.name + '/'
                        outlet = x.order_id.shop_id.name
                        cake += x.product_id.partner_ref + '/'
                        harga += x.price_unit
                    csv.append([
                                pos, 
                                outlet,
                                cake,
                                '-',
                                harga,
                                '-', '-', '-', '-', '-', '-', '-', '-'
                    ])  

        
        data = self.read(cr, uid, ids)[0]
        datas = {'ids': [data['id']]}
        datas['model'] = 'sale.varian.report'
        datas['form'] = data
        datas['csv'] = csv
        datas['debit'] = debit
        datas['credit'] = credit
        datas['cash'] = cash
        datas['transfer'] = transfer
         
         
        title = 'sale.varian'
        if data['name'] == 'shop':
            title = 'wtc.shop.excel'
        elif data['name'] == 'shopdf':
            title = 'wtc.shop.pdf'
        elif data['name'] == 'stock':
            title = 'sale.stock.pdf'
         
         
        return {
            'type': 'ir.actions.report.xml',
            'report_name': title,
            'nodestroy': True,
            'datas': datas,
        }
Exemplo n.º 35
0
def target2csv_exp(t):
  #print "[DEBUG] Processing target {}".format(t['id'])
  ttdls = {}
  if 'tdl_infos' in t:
    ttdls = t['tdl_infos']
  p = t['components']['protein'][0]
  ptdls = p['tdl_infos']
  if not p['dtoid']: p['dtoid'] = ''
  if not p['dtoclass']: p['dtoclass'] = ''
  if t['idg']:
    idg = 1
  else:
    idg = 0
  csv = [ t['id'], p['name'], p['description'], p['sym'], p['geneid'], p['uniprot'], p['stringid'], t['tdl'], idg, p['dtoid'], p['dtoclass'] ]
  if 'panther_classes' in p:
    csv.append( '|'.join(["%s:%s"%(d['pcid'],d['name']) for d in p['panther_classes']]) )
  else:
    csv.append('')
  if 'generifs' in p:
    csv.append( len(p['generifs']) )
  else:
    csv.append(0)
  if 'NCBI Gene PubMed Count' in ptdls:
    csv.append( ptdls['NCBI Gene PubMed Count']['value'] )
  else:
    csv.append(0)
  if 'JensenLab PubMed Score' in ptdls:
    csv.append( ptdls['JensenLab PubMed Score']['value'] )
  else:
    csv.append(0)  
  if 'PubTator Score' in ptdls:
    csv.append( ptdls['PubTator Score']['value'] )
  else:
    csv.append(0)
  csv.append( ptdls['Ab Count']['value'] )
  csv.append( ptdls['MAb Count']['value'] )
  # Activities
  if 'cmpd_activities' in t:
    csv.append( len(t['cmpd_activities']) )
  else:
    csv.append(0)
    #csv.append('')
  # ChEMBL
  if 'ChEMBL Selective Compound' in ttdls:
    csv.append( ttdls['ChEMBL Selective Compound']['value'] )
  else:
    csv.append('')
  if 'ChEMBL First Reference Year' in ttdls:
    csv.append( ttdls['ChEMBL First Reference Year']['value'] )
  else:
    csv.append('')
  # DrugCentral
  if 'drug_activities' in t:
    csv.append( len(t['drug_activities']) )
  else:
    csv.append(0)
    #csv.append('')
  # PDB
  if 'PDB' in p['xrefs']:
    pdbs = [d['value'] for d in p['xrefs']['PDB']]
    csv.append( len(pdbs) )
    csv.append( "|".join(pdbs) )
  else:
    csv.append(0)
    csv.append('')
  # GO
  if 'goas' in p:
    csv.append( len(p['goas']) )
  else:
    csv.append(0)
  if 'Experimental MF/BP Leaf Term GOA' in ptdls:
    csv.append( ptdls['Experimental MF/BP Leaf Term GOA']['value'] )
  else:
    csv.append(0)
  # Phenotypes
  if 'phenotypes' in p:
    omims = [d['trait'] for d in p['phenotypes'] if d['ptype'] == 'OMIM']
    if len(omims) > 0:
      csv.append( len(omims) )
      csv.append( "|".join(omims) )
    else:
      csv.append('')
      csv.append('')
    jaxs = ["%s:%s"%(d['term_id'],d['term_name']) for d in p['phenotypes'] if d['ptype'] == 'JAX/MGI Human Ortholog Phenotype']
    if jaxs:
      csv.append( len(jaxs) )
      csv.append( '|'.join(jaxs) )
    else:
      csv.append('')
      csv.append('')
  else:
    csv.append('')
    csv.append('')
    csv.append('')
    csv.append('')
  # IMPC phenotypes
  if 'impcs' in p:
    pts = ["%s:%s"%(d['term_id'],d['term_name']) for d in p['impcs']]
    csv.append( len(pts) )
    csv.append( '|'.join(pts) )
  else:
    csv.append('')
    csv.append('')
  # GWAS
  if 'gwases' in p:
    gwases = ["%s (%s):%s"%(d['disease_trait'],d['mapped_trait_uri'],d['p_value']) for d in p['gwases']]
    csv.append( len(gwases) )
    csv.append( '|'.join(gwases) )
  else:
    csv.append('')
    csv.append('')
  # Pathways
  if 'pathways' in p:
    pathways = ["%s:%s"%(d['pwtype'],d['name']) for d in p['pathways']]
    csv.append( len(pathways) )
    csv.append( "|".join(pathways) )
  else:
    csv.append('')
    csv.append('')
  # Diseases
  if 'diseases' in p:
    uniq = set( [d['name'] for d in p['diseases']] )
    csv.append( len(uniq) )
    # Top text-mining diseases
    tmdiseases = ["%s (ZScore: %s)"%(d['name'],str(d['zscore'])) for d in p['diseases'] if d['dtype'] == 'JensenLab Text Mining']
    if len(tmdiseases) > 0:
      csv.append( "|".join(tmdiseases[:5]) ) # Only top 5
    else:
      csv.append('')
    # eRAM diseases
    erams = [d for d in p['diseases'] if d['dtype'] == 'eRAM']
    if len(erams) > 0:
      csv.append( "|".join(["%s: %s"%(d['did'],d['name']) for d in erams]) )
    else:
      csv.append('')
  else:
    csv.append('')
    csv.append('')
    csv.append('')
  # Patent Count
  if 'EBI Total Patent Count' in ptdls:
    csv.append( ptdls['EBI Total Patent Count']['value'] )
  else:
    csv.append(0)
  # Is TF
  if 'Is Transcription Factor' in ptdls:
    csv.append(1)
  else:
    csv.append(0)
  if 'TMHMM Prediction' in ptdls:
    m = re.search(r'PredHel=(\d)', ptdls['TMHMM Prediction']['value'])
    if m:
      csv.append(m.groups()[0])
    else:
      csv.append(0)
  else:
    csv.append(0)
  # Tissue specificity
  if 'HPA Tissue Specificity Index' in ptdls:
    csv.append(ptdls['HPA Tissue Specificity Index']['value'])
  else:
    csv.append('')
  if 'HPM Gene Tissue Specificity Index' in ptdls:
    csv.append(ptdls['HPM Gene Tissue Specificity Index']['value'])
  else:
    csv.append('')
  if 'HPM Protein Tissue Specificity Index' in ptdls:
    csv.append(ptdls['HPM Protein Tissue Specificity Index']['value'])
  else:
    csv.append('')
  # TIN-X
  if 'tinx_novelty' in p:
    csv.append(p['tinx_novelty'])
  else:
    csv.append('')
  if 'tinx_importances' in p:
    # these come back ordered by score DESC. Only output top 5
    txis = ["%s: %s"%(d['disease'],str(d['score'])) for d in p['tinx_importances'][:5]]
    csv.append( "|".join(txis) )
  else:
    csv.append('')
  
  return csv
Exemplo n.º 36
0
   def process_qsub_attributes():
     rawAttributes = self.nodePoolDesc.getAttrs()
 
     # 'W:x' is used to specify torque management extentensions ie -W x= ...
     resourceManagementExtensions = ''
     if 'W:x' in rawAttributes:
       resourceManagementExtensions = rawAttributes['W:x']
 
     if qosLevel:
       if len(resourceManagementExtensions) > 0:
         resourceManagementExtensions += ';'
       resourceManagementExtensions += 'QOS:%s' % (qosLevel)
 
     rawAttributes['W:x'] = resourceManagementExtensions
     
     hostname = local_fqdn()
 
     rawAttributes['l:nodes'] = nodeSet._getNumNodes()
     
     if walltime:
       rawAttributes['l:walltime'] = walltime
     
     #create a dict of dictionaries for 
     # various arguments of torque
     cmds = {}
     for key in rawAttributes:
       value = rawAttributes[key]
 
       if key.find(':') == -1:
         raise ValueError, 'Syntax error: missing colon after %s in %s=%s' % (
           key, key, value)
 
       [option, subOption] = key.split(':', 1)
       if not option in cmds:
         cmds[option] = {}
       cmds[option][subOption] = value
     
     opts = []
     #create a string from this
     #dictionary of dictionaries createde above
     for k in cmds:
       csv = []
       nv = cmds[k]
       for n in nv:
         v = nv[n]
         if len(n) == 0:
           csv.append(v)
         else:
           csv.append('%s=%s' % (n, v))
       opts.append('-%s' % (k))
       opts.append(','.join(csv))
 
     for option in cmds:
       commandList = []
       for subOption in cmds[option]:
         value = cmds[option][subOption]
         if len(subOption) == 0:
             commandList.append(value)
         else:
             commandList.append("%s=%s" % (subOption, value))
       opts.append('-%s' % option)
       opts.append(','.join(commandList))
       
     return opts
Exemplo n.º 37
0
    line = re.sub('#\d\\^', "", line.rstrip())
    line = re.sub('(^style=.+)|-|<sup>...</sup>|\\(|\\)', "", line.rstrip())
    line = re.sub(' , ', " ", line.rstrip())
    lines.append(line)

num_lines = len(lines)
i = 0

outputFile = open("output.csv", 'wb')
wr = csv.write(outputFile)

while (not re.match('[A-Z]{3}', lines[i])):
    i += 1

csv = []
csv.append(["country", "year", "placing"])

while (i < num_lines and re.match('[A-Z]{3}', lines[i])):
    country = lines[i].rstrip()
    x = [1, 2, 3, 4]
    for placing in x:
        l2 = lines[i + placing]
        if (not re.match('align=centersort dash', l2)):
            for year in l2.split(" "):
                if len(year) > 1:
                    csv.append([country, year, str(placing)])
                    #csv.append(country + ", " + year + ", " + str(placing))
    i += 5

    while (i < num_lines and not re.match('[A-Z]{3}', lines[i])):
        i += 1
Exemplo n.º 38
0
def PyPoll(data):
    #Initialize some variables
    total_votes = 0
    candidate_voted_for = []

    csv = []
    for row in data:
        csv.append(row)

    for row in csv:
        total_votes += 1
        candidate_voted_for.append(row[2])
        #candidate = csv[2]

    #Pull unique elements from the list of candidates voted for
    unique_candidates = set(candidate_voted_for)
    #convert set back to a list
    unique_candidates = list(unique_candidates)

    #Initialize my list of candidate counts based on the index of unique_candidates
    candidate_count = []
    for row in csv:
        #Start the count at 0 for each candidate
        for candidate in unique_candidates:
            candidate_count.append(0)
        #for i in range(len(unique_candidates)):
        for i in range(4):
            if row[2] == unique_candidates[i]:
                #What are we going to do if this is true? add to that index in the candidate_count
                candidate_count[i] += 1

    candidate_1 = unique_candidates[0]
    candidate_2 = unique_candidates[1]
    candidate_3 = unique_candidates[2]
    candidate_4 = unique_candidates[3]

    candidate_1_count = candidate_count[0]
    candidate_2_count = candidate_count[1]
    candidate_3_count = candidate_count[2]
    candidate_4_count = candidate_count[3]

    #percentage votes each candidate won
    #candidate_percentage = []
    # winner = []
    # for i in range(len(unique_candidates)): # or range(len(candidate_count))
    #     #candidate_percentage[i].append(round((int(candidate_count[i])/int(total_votes[i]))*100,2))   #IndexError: list index out of range
    #     #who had the most votes?
    #     if i == 0:
    #         winner = unique_candidates[i]
    #     else: #i = 1,2,3
    #     #elif i == 1 or i == 2 or i == 3:
    #         if candidate_count[i] > winner:
    #             winner = unique_candidates[i]

    #percentages of votes
    candidate_1_per = round((int(candidate_1_count) / int(total_votes)) * 100,
                            1)
    candidate_2_per = round((int(candidate_2_count) / int(total_votes)) * 100,
                            1)
    candidate_3_per = round((int(candidate_3_count) / int(total_votes)) * 100,
                            1)
    candidate_4_per = round((int(candidate_4_count) / int(total_votes)) * 100,
                            1)

    #Determine the winner
    winner = []
    #Candidate 1 winner scenario:
    if (candidate_1_count > candidate_2_count) and (
            candidate_1_count > candidate_3_count) and (candidate_1_count >
                                                        candidate_4_count):
        #Is there a way to write this like "if candidate_1_count > (candidate_2_count and candidate_3_count and candidate_4_count)"?
        winner = candidate_1
    #Candidate 2 winner scenario:
    elif (candidate_2_count > candidate_1_count) and (
            candidate_2_count > candidate_3_count) and (candidate_2_count >
                                                        candidate_4_count):
        winner = candidate_2
    #Candidate 3 winner scenario:
    elif (candidate_3_count > candidate_2_count) and (
            candidate_3_count > candidate_1_count) and (candidate_3_count >
                                                        candidate_4_count):
        winner = candidate_3

    return [
        total_votes, candidate_1, candidate_1_count, candidate_1_per,
        candidate_2, candidate_2_count, candidate_2_per, candidate_3,
        candidate_3_count, candidate_3_per, candidate_4, candidate_4_count,
        candidate_4_per, winner
    ]
Exemplo n.º 39
0
	line = re.sub('(^style=.+)|-|<sup>...</sup>|\\(|\\)', "", line.rstrip())
	line = re.sub(' , ', " ", line.rstrip())
	lines.append(line)


num_lines = len(lines)
i = 0

outputFile = open("output.csv", 'wb')
wr = csv.write(outputFile);

while(not re.match('[A-Z]{3}', lines[i])):
	i += 1

csv = []
csv.append(["country", "year", "placing"])

while(i < num_lines and re.match('[A-Z]{3}', lines[i])):
	country = lines[i].rstrip()
	x = [1,2,3,4]
	for placing in x:
		l2 = lines[i+placing]
		if(not re.match('align=centersort dash', l2)):
			for year in l2.split(" "):
				if len(year) > 1:
					csv.append([country, year, str(placing)])
					#csv.append(country + ", " + year + ", " + str(placing))
	i += 5

	while(i < num_lines and not re.match('[A-Z]{3}', lines[i])):
		i += 1
Exemplo n.º 40
0
        if current_temperature == 9999:
            lcd.top("Temperature")
            lcd.bottom("Failed to read")
            lcd.cleanup()
            sys.exit(0)

        probe_minute_01.append(current_temperature)

        lcd.top("{:2.1f}".format(current_temperature) + chr(223) + "C  " + current_time.strftime("%H:%M:%S"))

        if last_minute != current_minute:
            lcd.display_init()
            probe_minute_15.append(current_temperature)
            probes_minute_30.append(current_temperature)
            probes_minute_60.append(current_temperature)
            csv.append(current_time.strftime("%s") + ";" + str(current_time) + ";" + "{:2.1f}".format(
                current_temperature).replace('.', ',') + "\n")

        lcd.bottom("{:2.1f}".format(probes_minute_60.average) + chr(223) + " " + "{:2.1f}".format(
            probes_minute_30.average) + chr(223) + " " + "{:2.1f}".format(probe_minute_15.average) + chr(223))

        time.sleep(2)

        last_minute = current_minute
        last_time = current_time

    except KeyboardInterrupt:
        lcd.cleanup()
        sys.exit(0)
Exemplo n.º 41
0
     msg = master.recv_msg()
 except KeyboardInterrupt:
     break
 if msg is not None:
     msg_type = msg.get_type()
     if msg_type == "MISSION_COUNT":
         mission_count = msg.count
         if mission_count > 0:
             master.mav.mission_request_int_send(0, 0, 0, mission_type)
         else:
             print("no mission")
     elif msg_type == "MISSION_ITEM_INT":
         if msg.seq == expect_seq:
             print("recv mission item", msg.seq)
             csv.append(
                 str(msg.seq) + "," + str(msg.current) + "," +
                 str(msg.frame) + "," + str(msg.command) + "," +
                 str(msg.param1) + "," + str(msg.param2) + "," +
                 str(msg.param3) + "," + str(msg.param4) + "," +
                 str(msg.x) + "," + str(msg.y) + "," + str(msg.z) + "," +
                 str(msg.autocontinue) + "\n")
             expect_seq = expect_seq + 1
             if expect_seq < mission_count:
                 master.mav.mission_request_int_send(
                     0, 0, expect_seq, mission_type)
             else:
                 master.mav.mission_ack_send(0, 0, 0, mission_type)
                 print("done")
                 with open(file_name, 'w') as out_file:
                     out_file.writelines(csv)
                 break
Exemplo n.º 42
0
    def _process_metrics_for_csv(self, csv, metric_results_dict, batch, attn_list, layer_1_entropy, input_seq_entropy, scores):
        batch = [x.cpu().detach().numpy() for x in batch]
        scores = scores.cpu().detach().numpy()
        users = None
        if len(batch) == 4:
            seqs, candidates, labels, users = batch
        elif len(batch) == 3:
            seqs, candidates, labels = batch

        assert users.shape[0] == 1

        row = 0
        internal_user_id = users[row][0]
        new_row = [internal_user_id if users is not None else -1]
        new_row += [metric_results_dict['NDCG@%d' % k] for k in self.metric_ks]
        new_row += [metric_results_dict['Recall@%d' % k] for k in self.metric_ks]
        new_row += self._map_internal_movie_list_to_original([int(candidates[0][0])])

        attn_layer_1 = attn_list[0][0]
        attn_layer_2 = attn_list[1][0]

        top_left_coord_to_keep = (attn_layer_1[0] == 0).sum()
        new_row += [200 - top_left_coord_to_keep]

        attn_layer_1 = attn_layer_1[top_left_coord_to_keep:, top_left_coord_to_keep:]
        attn_layer_2 = attn_layer_2[top_left_coord_to_keep:, top_left_coord_to_keep:]

        csv.append(new_row)

        minmax = {
            325: [0.002, 0.03],
            639: [0.018, 0.087],
            616: [0.017, 0.083],
            500: [0.005, 0.044],
            127: [0.015, 0.085],
            115: [0.003, 0.045],
            187: [0.004, 0.045],
            59: [0.004, 0.045],
            627: [0.008, 0.058],
            1094: [0.01, 0.059],

            880: [0.002, 0.029],
            973: [0.005, 0.045],
            1906: [0.017, 0.082],
            1968: [0.001, 0.029],

            226 : [0.013, 0.072],
            490: [0.0175, 0.08],
            1807: [0.012, 0.07],
        }

        minmax_inp_seq = {
            325: [0.0025, 0.018],
            639: [0.021, 0.065],
            616: [0.025, 0.065],
            500: [0.01, 0.038],
            127: [0.02, 0.065],
            115: [0.005, 0.035],
            187: [0.0075, 0.034],
            59: [0.0075, 0.037],
            627: [0.0011, 0.038],
            1094: [0.015, 0.045],

            880: [0.003, 0.0225],
            973: [0.01, 0.039],
            1906: [0.02, 0.06],
            1968: [0.003, 0.025],

            226 : [0.015, 0.057],
            490: [0.02, 0.062],
            1807: [0.013, 0.054],}

        # minmax = {k:[None, None] for k,v in minmax.items()}
        # minmax_inp_seq = {k: [None, None] for k, v in minmax_inp_seq.items()}

        input_item_attn_projection = self._project_attention_on_input(attn_layer_1, attn_layer_2)

        l1_entr = np.average((-attn_layer_1*np.log2(attn_layer_1)).sum(axis=1))
        layer_1_entropy.append(l1_entr)
        inp_entr = (-input_item_attn_projection*np.log2(input_item_attn_projection)).sum(axis=1)[0]
        input_seq_entropy.append(inp_entr)

        return csv, layer_1_entropy, input_seq_entropy
        # if internal_user_id not in minmax:
        #     return csv, layer_1_entropy, input_seq_entropy

        temp_name = 'core_'
        root_dump = os.path.join('Images', 'AttentionTemp', str(internal_user_id))
        # root_dump = os.path.join(self.export_root, 'logs', 'attention', str(internal_user_id))
        Path(root_dump).mkdir(parents=True, exist_ok=True)

        rank = (-scores).argsort(axis=1)
        top10 = candidates[0][rank[0][:10]]

        input_target_dict = {'target': self._map_internal_movie_list_to_original([int(candidates[0][0])]),
                             'predicted': self._map_internal_movie_list_to_original(top10.tolist()),
                             'input_projected_attn': input_item_attn_projection[0].tolist(),
                             'input': self._map_internal_movie_list_to_original([x for x in seqs[row].tolist() if x != 0])}

        with open(os.path.join(root_dump, temp_name+'input_target.json'), 'w') as f:
            json.dump(input_target_dict, f, indent=4)

        min = minmax_inp_seq[internal_user_id][0]
        max = minmax_inp_seq[internal_user_id][1]
        # min, max = None, None
        fig, ax = plt.subplots()
        im = ax.imshow(input_item_attn_projection, cmap='coolwarm', interpolation=None, vmin=min, vmax=max)
        cbar = ax.figure.colorbar(im, ax=ax)
        cbar.ax.set_ylabel('Attention Weight')
        plt.xlabel('Input Positions')
        plt.yticks([], None)
        ax.xaxis.set_major_locator(MaxNLocator(integer=True))
        fig.savefig(os.path.join(root_dump, temp_name+'input_proj.png'), bbox_inches='tight')
        fig.clf()
        plt.close()

        min = minmax[internal_user_id][0]
        max = minmax[internal_user_id][1]
        # min, max = None, None
        fig, ax = plt.subplots()
        im = ax.imshow(attn_layer_1, cmap='coolwarm', interpolation=None, vmin=min, vmax=max)
        cbar = ax.figure.colorbar(im, ax=ax)
        cbar.ax.set_ylabel('Attention Weight')
        plt.xlabel('Key Positions')
        plt.ylabel('Query Positions')
        ax.yaxis.set_major_locator(MaxNLocator(integer=True))
        ax.xaxis.set_major_locator(MaxNLocator(integer=True))
        fig.savefig(os.path.join(root_dump, temp_name+'layer1.png'), bbox_inches='tight')
        fig.clf()
        plt.close()

        fig2, ax2 = plt.subplots()
        im2 = ax2.imshow(attn_layer_2, cmap='coolwarm', interpolation=None)
        cbar2 = ax2.figure.colorbar(im2, ax=ax2)
        cbar2.ax.set_ylabel('Attention Weight')
        plt.xlabel('Key Positions')
        plt.ylabel('Query Positions')
        ax2.yaxis.set_major_locator(MaxNLocator(integer=True))
        ax2.xaxis.set_major_locator(MaxNLocator(integer=True))
        fig2.savefig(os.path.join(root_dump, temp_name+'layer2.png'), bbox_inches='tight')
        fig2.clf()
        plt.close()

        return csv, layer_1_entropy, input_seq_entropy
Exemplo n.º 43
0
import scipy
import csv

import numpy as np

subdirs = [
    'Sirene', 'Auto', 'Flugzeug', 'PartyBabble', 'Straße', 'Waschmaschine'
]  ## noise subdirs
db = [20, 15, 10, 5, 0, -5]  ## all SNRs
csv = []
NET_TYPE = "cnn_oned_60"

#read in .csv files with evaluation metrics:
for h in range(0, 6):
    csv.append(
        np.genfromtxt('metrics' + str(NET_TYPE) + subdirs[h] +
                      'aprioSNR_mean-30.csv',
                      delimiter=","))

#which metrics shall be plotted:

PESQANDSTOI = 1
SNR15 = 0
POSTGAIN = 0
LDSPLOT = 0

# convert all metrics into one 3D-Array
csv = np.array(csv)

if PESQANDSTOI == 1:
    """
    Plots STOI and PESQ Metrics for all noise types at all SNRs   
Exemplo n.º 44
0
def dados_cnae(cnae):

    # url = "http://www.fieb.org.br/guia/Resultado_Consulta?CodCnae=C&NomeAtividade=IND%c3%9aSTRIAS%20DE%20TRANSFORMA%c3%87%c3%83O.&operProduto=and&localizacao=&ordenacao=ind_razao_social&page=0&consulta=Consultas%20Especiais"
    p = urllib.parse.urlencode({
        'CodCnae': cnae[0],
        'NomeAtividade': cnae[1],
        'operProduto': 'and',
        'localizacao': '',
        'ordenacao': 'ind_razao_social',
        'page': 0,
        'consulta': 'Consultas Especiais'
    })
    p.encode('ascii')
    url = "http://www.fieb.org.br/guia/Resultado_Consulta.aspx?%s" % p
    urlemp = []
    csv = []
    i = 1
    pagina = requests.get(url)
    html = BeautifulSoup(pagina.content, 'lxml')

    # PEGAR QUANTIDADE DE PAGINAS NO RESULTADO DA CONSULTA
    NUM_PAGINA = html.find(
        id="ContentPlaceHolder1_generalContent_rpt_lblLastPage").text
    URL_EMPRESA = html.find_all(
        id="label-consulta-3")  #LINKS DO PERFIL DA EMPRESA PAGINA 1
    for alink in URL_EMPRESA:
        urlemp.append(alink.a.get('href'))
    for lnk in urlemp:
        url = "http://www.fieb.org.br/guia/" + lnk
        pagina = requests.get(url)
        html = BeautifulSoup(pagina.content, 'lxml')
        dados_emp = limpa_dados(list(html.find(id="divDadosIndustria")))

        #print('Obtendo Links das paginas')
        if dados_emp != None and dados_emp != False:
            csv.append(u.parse_csv(dados_emp))

    if int(NUM_PAGINA) > 1:
        urlemp.clear()

        while int(NUM_PAGINA) > i:  # EXECUTA PAGINAÇÃO

            p = urllib.parse.urlencode({
                'CodCnae': cnae,
                'NomeAtividade': cnae[1],
                'operProduto': 'and',
                'localizacao': '',
                'ordenacao': 'ind_razao_social',
                'page': i,
                'consulta': 'Consultas Especiais'
            })
            p.encode('ascii')
            url = "http://www.fieb.org.br/guia/Resultado_Consulta.aspx?%s" % p

            pagina = requests.get(url)
            html = BeautifulSoup(pagina.content, 'lxml')
            total_emp = html.find_all(id="label-consulta-3")
            for alink in total_emp:
                urlemp.append(alink.a.get('href'))
            c = 1

            for lnk in urlemp:
                url = "http://www.fieb.org.br/guia/" + lnk
                pagina = requests.get(url)
                html = BeautifulSoup(pagina.content, 'lxml')
                dados_emp = limpa_dados(list(
                    html.find(id="divDadosIndustria")))

                if dados_emp == False:
                    print(url)
                    print(html)
                    exit()
                #if parse_csv(dados_emp) != None:
                csv.append(u.parse_csv(dados_emp))
                c += 1
            urlemp.clear()
            i += 1
        # PEGA OS LINKS DAS EMPRESAS DA PAGINA
    print('DADOS COLETADOS =', i)
    export_csv(csv, cnae)
    csv.clear()