forked from loogica/copa_transparente
-
Notifications
You must be signed in to change notification settings - Fork 0
/
execucao_licitacao.py
144 lines (116 loc) · 4.3 KB
/
execucao_licitacao.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
# coding: utf-8
import csv
import decimal
import datetime
import math
import os
import pickle
import sys
import tempfile
import json
import humanize
from copa_transparente.query import select
in_file = open("grafo1.pickle", "rb")
grafo = pickle.loads(in_file.read())
in_file.close()
def fix_line(line, max_rows):
ret = []
for i, data in enumerate(line):
if i >= max_rows:
break
if data.endswith('"'):
new_data = line.pop(i + 1)
data = "".join([data, new_data])
ret.append(data)
return ret
def extrai_table_name(meta_name):
return meta_name.split('_')
def process():
table_name = "ExecucaoFinanceira"
files = os.listdir('banco/data')
data_file_path = None
for f in files:
fixex_table_name = extrai_table_name(f)[1]
if fixex_table_name.startswith(table_name):
data_file_path = os.path.join('banco/data', f)
if not data_file_path:
print('Nome errado de tabela')
data_file = os.path.basename(data_file_path)
meta_name = data_file.replace(".csv", "")
table_name = extrai_table_name(meta_name)[1]
table_timestamp = extrai_table_name(meta_name)[0]
text_file = open(data_file_path, "r", encoding="utf-8")
new_file = open("temp.csv", "wt")
while True:
data = text_file.read(4096)
if not data:
break
data = data.replace("\r\n", "")
new_file.write(data)
new_file.flush()
new_file.close()
text_file = open("temp.csv", "r", encoding="utf-8")
original_lines = 0
for line in text_file.readlines():
original_lines += 1
new_file = open("temp.csv", "rt")
reader = csv.reader(new_file, delimiter=';', quotechar='"', skipinitialspace=True)
print("Dados de {0}".format(table_name))
table = grafo[table_name]
valid_lines = 0
for line in reader:
if len(line) == 1:
continue
line = fix_line(line, len(table._columns))
try:
table.add_data(line)
valid_lines += 1
except Exception:
print("Erro na linha {}".format(valid_lines))
# 4397 - 3606 = 791
# 3634 - 3606 = 28
# 791 / 4397
missing_lines = original_lines - valid_lines
error = float(missing_lines/original_lines)
error_str = "{:.2%}".format(error)
print("Total: {}".format(original_lines))
print("Parsed: {}".format(valid_lines))
print("Error Rate: {:.2%}".format(error))
line_count = 0
query = select("IdExecucaoFinanceira", "IdLicitacao", "ValContrato", "IdInstituicaoContratado")._from(table)
for data in query:
#print(data)
line_count += 1
pass
print("Total de registros analisados : {} ({} perdidos)".format(line_count, missing_lines))
query = select("IdExecucaoFinanceira", "IdLicitacao", "ValContrato")._from(table)
total_com_licitacao = sum(map(lambda x: x[2], filter(lambda x: x[1], query)))
print("Execução Financeira com referência para Licitação: {}".format(
total_com_licitacao))
query = select("IdExecucaoFinanceira", "IdLicitacao", "ValContrato")._from(table)
total_sem_licitacao = sum(map(lambda x: x[2], filter(lambda x: not x[1], query)))
print("Execução Financeira sem referência para Licitação: {}".format(
total_sem_licitacao))
query = select("IdExecucaoFinanceira", "IdLicitacao", "ValContrato")._from(table)
total = sum(map(lambda x: x[2], query))
print("Total dos gastos: {}".format(total))
date_str = datetime.datetime.now().strftime("%d/%m/%Y")
data = {
'd_total': str(total),
'd_total_sem_ref_lic': str(total_sem_licitacao),
'd_total_com_ref_lic': str(total_com_licitacao),
'total': humanize.intword(total).replace("billion", "bilhões"),
'total_sem_ref_lic': humanize.intword(total_sem_licitacao).replace("billion", "bilhões"),
'total_com_ref_lic': humanize.intword(total_com_licitacao).replace("billion", "bilhões"),
'percentual_dados_desconsiderados': error_str,
'atualizado': date_str
}
out = open("data.json", "wt")
out.write(json.dumps(data))
out.close()
out = open("hist/data_{}.json".format(datetime.datetime.now().strftime("%Y%m%d")), "wt")
out.write(json.dumps(data))
out.close()
print(data)
if __name__ == "__main__":
process()