def read_excel(val_id): file = FileUploat.objects.get(id=val_id) val = re.read_excel(file.name.path) #print(archivo_excel.columns) #values = archivo_excel['id'].values # for index, row in re.iterrows(): # print(row["name"], row["age"]) for index, row in re.iterrows(): user = User.objects.create_user( id=val['id'], last_login=val['last_login'], is_superuser=val['is_superuser'], username=val['username'], first_name=val['first_name'], last_name=val['last_name'], email=row['email'], is_staff=val['is_staff'], is_active=val['is_active'], date_joined=val['date_joined']) # creamos el usuario y clave # columnas = ['id', 'last_login', 'is_superuser', 'username', 'first_name', # 'last_name', 'email', 'is_staff', 'is_active', 'date_joined'] #user.is_superuser = True # permisos de superusuario #user.is_staff = True # definimos si es parte del staff user.save() # guardamos los datos.
def run(self): global my_vehicle while(self.steps <= self.end_time): #print 'updating state thread' pd = LoadPartialData(self.steps, my_vehicle.id) for index,row in pd.iterrows(): if(int(row['id'])==my_vehicle.id): my_vehicle.set_speed(row['speed']) my_vehicle.set_location(row['x'],row['y']) #print ('locaiton of car, ', my_vehicle.id , ' is (',my_vehicle.get_location()) data = "{\"carid\":"+ str(row['id']) +",\"seq\":" + str(self.count) + \ ",\"timestamp\":\"" + str(int(time.time()*1000)) + \ "\",\"longitude\":"+ str(row['x'])+ \ ",\"latitude\":"+ str(row['y'])+ \ ",\"speed\":" + str(row['speed']) + \ ",\"angle\":" + str(row['angle']) + \ ",\"type\":" + str(row['type']) + \ "}" producer.send(TOPIC,data) time.sleep(0.1) self.steps+=0.1 self.count+=1
def main(): ReadDataFromFile('vehicleGeoData.csv') #print(X) end_time = 200 steps = 0.0 count = 0 #global sock #sock.listen(1) #connection, client_address = sock.accept() while (steps <= end_time): #print ( 'step : ', steps , 'time : ' , int(time.time()*1000)) pd = LoadPartialData(steps) for index, row in pd.iterrows(): data = "{\"carid\":" + str(row['id']) + ",\"seq\":" + str( count) + ",\"timestamp\":\"" + str(int( time.time() * 1000)) + "\",\"longitude\":" + str( row['x']) + ",\"latitude\":" + str( row['y']) + ",\"speed\":" + str(row['speed']) + "}" if row['id'] < 5: print(data) #print (row['time'] , row['id'] , row['x'] , row['y']) #print ('time : ' , int(time.time()*1000)) #connection.sendall(data) if row['id'] < 5: producer.send('TextLinesTopic', data) time.sleep(0.1) count = count + 1 steps = steps + 0.1
def run(self): global producer, TOPIC print('car ', car_id, ' started broadcasting data!') while (self.steps <= self.end_time): #print 'start thread' pd = LoadPartialData(self.steps) for index, row in pd.iterrows(): data = "{\"carid\":" + str(row['id']) + ",\"seq\":" + str( self.count) + ",\"timestamp\":\"" + str( int(time.time() * 1000)) + "\",\"longitude\":" + str( row['x']) + ",\"latitude\":" + str( row['y']) + ",\"speed\":" + str( row['speed']) + "}" #print (data) #producer.send(TOPIC,data) cs = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP) cs.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) cs.setsockopt(SOL_SOCKET, SO_BROADCAST, 1) cs.sendto(data, ('', 4499)) time.sleep(0.1) self.steps += 0.1 self.count += 1
def binary_n(pd): rs = pd.copy() for row in pd.iterrows(): row = row[1] rows = binary(row) if rows is None: continue rs = rs.append(rows) print len(rs) print len(pd) return rs
def pd_to_sql(pd, table): def get_colums(pd): return " (" + ", ".join(str(key) for key in pd.keys()) + ") " sql_list = [] columns = get_colums(pd) for indice_fila, fila in pd.iterrows(): # print(fila) sql = "INSERT INTO " + table + columns + "VALUES(" for attr in fila: if isinstance(attr, str): sql += "'" + str(attr) + "', " else: sql += str(attr) + ", " sql = sql[:-2] + ");" sql_list.append(sql) return "\n".join(sql_list)
output_file.writelines('\n') print("Count:",count) count += 1 return count, not_found_count if __name__ == '__main__': count = 1 not_found_count = 0 output_file = open(os.path.join(HOME_DIR, 'data.csv'), 'a') output_file2 = open(os.path.join(HOME_DIR, 'wiki.csv'), 'w') output_file.writelines('\n') output_file.writelines('\n') output_file.writelines('\n') pd = get_reading_urls_from_file(os.path.join(HOME_DIR,'urls_list.xlsx')) for row in pd.iterrows(): country_name = row[1][0] if country_name not in ["Italy", "France", "Spain","Greece", "Hungary", "germany", "portugal", "austria", "belgium", "luxembourgc"]: url = row[1][3] count, not_found_count = scrap_url(country_name, url, count, output_file, output_file2, not_found_count) print("Not found count: ",not_found_count)