コード例 #1
0
def word_repost_relationship(temp_dir, searchList, breakpos=None):
    # 据进程名生成日志
    name = 'getRepost_' + str(os.getpid())
    logger = getLogger(name)
    # 每个进程维护一个sublist转发关系的层级目录(临时)
    level_dir = temp_dir + 'temp/'
    os.mkdir(level_dir)

    # 断点处理
    if not breakpos:
        # 生成写文件
        repost_file = temp_dir + name + '.csv'
        repost_writer = csvWriter(repost_file, repost=True)
    else:
        repost_file = temp_dir + breakpos['repost_file']
        repost_writer = csvWriter(repost_file, repost=True, breakpos=True)
        # 先爬取完断点id,再对余下id按常规爬取
        get_repost_relationship(breakpos['center_bw_id'], repost_writer,
                                level_dir, logger, breakpos)
        searchList = searchList[1:]

    # 常规爬取
    logger.info('Strat getting repost...')
    for id in searchList:
        get_repost_relationship(id, repost_writer, level_dir, logger)
    logger.info('Finish!')
    # 删除临时目录
    shutil.rmtree(level_dir)
コード例 #2
0
def app(event_loop):
    # main  -> put arguments.
    logger = getLogger('main')
    parser = argument_parser()

    try:
        prepare_service(logger, event_loop, parser)
    except KeyboardInterrupt:
        logger.info('service end, reason: keyboard interrupt')
コード例 #3
0
def word_spider(searchlist):
    # 加载设置文件,获取数据输出路径和检索词
    config = load_config()
    hot_dir = config['hot_dir']
    topic_dir = config['topic_dir']
    repost_dir = config['repost_dir']
    # 根据规定日志目录创建目录实例
    name = multiprocessing.current_process().name
    logger = getLogger(name)
    topic_dir += name + '_'
    # 记录载入检索词列表的次数
    epoch = 1

    while True:
        # 对每一个词爬取相关微博和各微博的转发关系
        for wd in searchlist:
            logger.info(f'EPOCH: {epoch}. Keyword: {wd}. Start crawling ...')
            search_file = hot_dir + 'search_result_' + str(wd) + '.csv'
            repost_file = repost_dir + 'repost_Relationship_' + str(wd) + '.csv'
            # 创建两个写的对象,同时创建文件
            search_writer = csvWriter(search_file, search=True)
            repost_writer = csvWriter(repost_file, repost=True)

            # 获取该检索词的所有相关微博,至多能获取1000条
            get_query_info(wd, search_writer, logger)

            # 获取相关微博id组成的列表
            idList = search_writer.get_idList()
            # 获取各相关微博的转发关系
            for bw_id in idList:
                get_repost_relationship(bw_id, repost_writer, logger)

            repost_writer.drop_duplicates()

            # 获取该词相关所有话题作为之后的检索词
            get_more_topic(wd, epoch, topic_dir, logger)

        # 结束一轮检索爬取
        # 获取新检索词列表
        filename = topic_dir + 'Topics_' + str(epoch) + '.csv'
        with open(filename, 'r', encoding='utf-8-sig') as f:
            rows = csv.reader(f)
            searchlist = [row[0].strip() for row in rows]

        # 删除中间文件
        os.remove(filename)

        epoch += 1
コード例 #4
0
ファイル: common_utils.py プロジェクト: wilsonZWS/ETLDL
 def read(self, file_dir, file_name):
     log = logger.getLogger()
     file_path = file_dir + file_name
     try:
         if os.path.exists(file_path):
             with open(file_path, 'r') as f:
                 meta = json.loads(f.read())
                 self.__dict__ = meta
             log.debug('loading %s' % file_path)
             return True
         else:
             log.debug('No {} file founded. Creating new one.'.format(file_name))
             JSONUtils.write(self, file_dir, file_name)
             return False
     except Exception as e:
         traceback.print_exc()
         print("read an JSON file to path error: %s, Case: %s" % (file_path, e))
         return False
コード例 #5
0
import os
import copy
import time
import pickle
import xml.etree.ElementTree as ET
from os.path import expanduser

import rospy
import tf

from config import ROS_Launch_File, Template_Turtlebot_Launch, Template_Rosbot_Launch, Map_Dir, Launch_Max_Try, Nav_Process_Pool, Robot_Model, Pos_Value_Splitter
from utils.ros_utils import checkRobotNode, shell_open, initROSNode

#from utils.logger import logger
from utils.logger import getLogger
logger = getLogger('Turtlebot_Launcher')
logger.propagate = False


class Turtlebot_Launcher():
    def __init__(self, inspection_id, siteid, robots):
        self.robots = robots
        self.siteid = siteid
        self.inspection_id = inspection_id
        self.trans_listener = None

    def launch(self):

        launched = False
        for i in range(Launch_Max_Try):
            logger.info(
コード例 #6
0
ファイル: validator.py プロジェクト: GbrickPlatform/gbrick
 def logger(self):
     if self._logger is None:
         self._logger = getLogger('node')
     return self._logger
コード例 #7
0
# -*- coding: utf-8 -*-
""" 
@author: xingxingzaixian
@create: 2020/9/6
@description: 
"""
from django.contrib.auth import backends
from django.db.models import Q
from django.contrib.auth.backends import UserModel
from rest_framework.exceptions import AuthenticationFailed

from utils.auth.jwt_util import JwtUtil
from utils.logger import getLogger


logger = getLogger('auth')
class UserBackend(backends.ModelBackend):
    def authenticate(self, request, username=None, password=None, **kwargs):
        try:
            user = UserModel.objects.get(Q(username=username) | Q(telephone=username))
            if user.check_password(password):
                token = JwtUtil.gen_jwt_token(user)
                user.token = token
                return user
        except UserModel.DoesNotExist as e:
            logger.error(f'[username={username}]: {e}')
        raise AuthenticationFailed(detail="User authentication failure")
コード例 #8
0
import requests
from discord import Embed, Message
from discord.ext import commands
from requests import Session

from bot.cog import BaseCog
from utils.config import Config, ConfigData
from utils.googledrive import GoogleDrive as Drive
from utils.googledrive import GoogleDriveFile
from utils.logger import getLogger

drive: Drive = Drive()
config: ConfigData = Config.read()

r: Session = requests.Session()
logger: Logger = getLogger(__name__)


class CmdFanart(BaseCog):
    @commands.group()
    async def fanart(self, ctx):
        if ctx.invoked_subcommand is None:
            await ctx.send('subcommand is required')

    @fanart.command()
    async def list(self, ctx):
        logger.info("fetch VTuberFanartCrawler image count")
        embed = Embed(title="Fanart Image Count",
                      description="fetch image. please wait for while...",
                      color=0x80fffd)
コード例 #9
0
import copy
import json
import time

import redis
from kafka import KafkaProducer

import config
from utils.logger import getLogger

logger = getLogger('utils-kafka')
logger.propagate = False

status_producer = KafkaProducer(
    bootstrap_servers=config.Kafka_Brokers,
    compression_type='gzip',
    value_serializer=lambda x: json.dumps(x).encode())

redis_connector = redis.Redis(host=config.redis_host,
                              port=config.redis_port,
                              db=0)

# just an example, you don't have to follow this format
task_status_payload = {
    "inspection_id": 3,
    "task_type": 0,
    "site_id": "site01",
    'timestamp': 1599033481,
    "robot": {
        "robot_id": "robot03",
        "checkpoint_no": 2,
コード例 #10
0
ファイル: tsdb.py プロジェクト: wdxpz/robottaskcontroller
import copy
import datetime
from influxdb import InfluxDBClient

import config
from utils.logger import getLogger

logger = getLogger('utils-tsdb')

body_pos = {
    'measurement': config.Table_Name_Robot_Pos,
    'time': 0,
    'tags': {
        'robot_id': 0,
        'inspection_id': 0,
        'site_id': 0,
    },
    'fields': {
        'pos_x': 0,
        'pos_y': 0,
        'pos_angle': 0
    }
}

body_event = {
    'measurement': config.Table_Name_Robot_Event,
    'time': 0,
    'tags': {
        'robot_id': 0,
        'inspection_id': 0,
        'site_id': 0,
コード例 #11
0
import redis
from kafka import KafkaProducer
import json

from utils.logger import getLogger
logger = getLogger('test_redis_kafka')
logger.propagate = False

robot_id = 'tb3_0'
task_type = 30
site_id = 'bj02'
inspection_id = 562


def test_redis_service():
    #redis
    redis_host = "123.1127.237.146"
    redis_port = "6379"

    robot_position_payload = {
        "timestamp": 1599033481,
        "robot_id": 'hello_a',
        "inspection_id": 0,
        "site_id": 0,
        "location": '0-0-0'
    }
    redis_connector = redis.Redis(host=redis_host, port=redis_port, db=0)
    try:
        redis_connector.hmset(0, robot_position_payload)

        logger.info('Redis operation : send robot pos {}#{}'.format(10, 2))
コード例 #12
0
import torch
import torch.optim as optim
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.functional as F
import tensorboard_logger as tb

import my_optim
from envs import create_atari_env
from model import ActorCritic
from train import train
from test import test
from utils import logger
from utils.shared_memory import SharedCounter

logger = logger.getLogger('main')

# Based on
# https://github.com/pytorch/examples/tree/master/mnist_hogwild
# Training settings
parser = argparse.ArgumentParser(description='A3C')
parser.add_argument('--lr',
                    type=float,
                    default=0.0001,
                    metavar='LR',
                    help='learning rate (default: 0.0001)')
parser.add_argument('--gamma',
                    type=float,
                    default=0.99,
                    metavar='G',
                    help='discount factor for rewards (default: 0.99)')
コード例 #13
0
ファイル: train.py プロジェクト: richard403/autotext
# export PYTHONPATH="/home/whx/workspace/work/python_code/autotext:{$PYTHONPATH}"
import tensorflow as tf
import numpy as np
from datamanager import dataload
from rnn.model import RNN
from config import SEQ_LEN
import argparse
import os
from utils import dateUtil
from utils import fileUtil
from utils import logger
from utils import configParse
from rnn import predict

_LOG = logger.getLogger('rnn_train')


def saveModel(model, saveModelDir, signatures=None):
    if saveModelDir is None:
        return
    tagDir = os.path.join(saveModelDir, dateUtil.getNow(format='%Y%m%d%H'))
    tagDir = fileUtil.getDir(tagDir, op=2)
    if type(signatures) == dict:
        tf.saved_model.save(model, tagDir, signatures)
    else:
        tf.saved_model.save(model, tagDir)


def loadModel(loadModelDir, flag):
    tagDir = os.path.join(loadModelDir, flag)
コード例 #14
0
def word_get_query_info(wd, writer):
    logger = getLogger('getQuery')
    logger.info(f'Keyword: {wd}. Start crawling ...')
    get_query_info(wd, writer, logger)
コード例 #15
0
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''

# TurtleBot must have minimal.launch & amcl_demo.launch
# running prior to starting this script
# For simulation: launch gazebo world & amcl_demo prior to run this script

import rospy
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
import actionlib
from actionlib_msgs.msg import *
from geometry_msgs.msg import Pose, Point, Quaternion

from config import Wait_For_GoToPose_Time, DEBUG
from utils.logger import getLogger
logger = getLogger('GoToPose')


class GoToPose():
    def __init__(self, inspection_id, robot_id):

        self.inspection_id = inspection_id
        self.robot_id = robot_id
        self.msg_head = 'inspection:{} robot: {}: '.format(
            inspection_id, robot_id)

        self.goal_sent = False

        # What to do if shut down (e.g. Ctrl-C or failure)
        rospy.on_shutdown(self.shutdown)
コード例 #16
0
ファイル: zhuyuan.py プロジェクト: huazhicai/zheyi
import re
import time
import random

import requests
from lxml import etree
from datetime import datetime

from config.base_config import user_agent
from utils.util import filter_data
from utils.logger import getLogger
from data_structure.singleton_structure_content import new_content

logger = getLogger('zhuyuan')

headers = {'User-Agent': random.choice(user_agent)}


def xueyansuo(blh):
    url = 'http://192.168.2.8:8055/Report/SearchReport'
    json_data = {
        "bah": blh,
        "startTime": "2007-01-01",
        "endTime": datetime.now().strftime('%Y-%m-%d'),
        "Types": ["XYS", "PS", "SZB", "LIS-RST"],
    }
    resp = requests.post(url, json=json_data)
    if len(resp.text) > 10:
        doc = etree.HTML(resp.text)
        result = doc.xpath('//tr')
        xys = []
コード例 #17
0
import time
import math

import rospy
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from tf.transformations import euler_from_quaternion

from utils.logger import getLogger
logger = getLogger('RotateController')
logger.propagate = False

PI = 3.1415926535897

class RotateController():
    def __init__(self, inspection_id, robot_id):

        

        self.inspection_id = inspection_id
        self.robot_id = robot_id
        self.msg_head = 'inspection:{} robot: {}: '.format(inspection_id,robot_id)
        # self.sub = rospy.Subscriber ('/{}/odom'.format(self.robot_id), Odometry, self.get_rotation)
        self.rotate_pub = rospy.Publisher('/{}/cmd_vel'.format(self.robot_id), Twist, queue_size=1)
        self.rotate_command =Twist()
        self.roll = 0.0
        self.pitch = 0.0
        self.yaw = 0.0
        self.ctrl_c = False
        self.rate = rospy.Rate(10)
コード例 #18
0
 def logger(self):
     if self._logger is None:
         self._logger = getLogger('storage')
     return self._logger
コード例 #19
0
ファイル: main.py プロジェクト: wdxpz/robottaskcontroller
import threading
import time
import json

from kafka import KafkaConsumer

from config import Task_Type, Kafka_Brokers, Task_Topic
from utils.ros_utils import killNavProcess, initROSNode
from tasks.inspection import execInspection

from utils.logger import getLogger
logger = getLogger('main')
logger.propagate = False

task_subscriber = KafkaConsumer(
    bootstrap_servers=Kafka_Brokers,
                         group_id="robot_controller2", auto_offset_reset="earliest")

# def execTaskLoop():
    
#     while True:
#         task = getTasksFromMsgQueue()
#         if task is None:
#             time.sleep(1)
#             continue
#         task_type, task_data = task[0], task[1]
#         if task_type == Task_Type["Task_Inspection"]:   
#             inspection_id = int(task_data['inspection_id'])
#             task_name = 'inpsection: {}'.format(inspection_id)
#             logger.info('start inspection task: {}'.format(task_name))
#             task = threading.Thread(name=task_name, target=execInspection, args=(task_data,))
コード例 #20
0
from utils.logger import getLogger
logger = getLogger('InspectionMonitor')
logger.propagate = False

from config import Inspection_Status_Codes

Worker_Status = {'working': 0, 'idle': 1, 'failed': 3}


class InspectionMonitor(object):
    """
    InspectionMonitor only records the status of on-going inspection task,
    which also records the workers (robot)'s three status: 'working', 'idel' and 'failed', 
    but dose not record the robot's entrance and leave events
    """

    inspection_monitor = None

    def __init__(self):
        self.task_list = {}

    @staticmethod
    def getInspectionMonitor():
        if InspectionMonitor.inspection_monitor is None:
            InspectionMonitor.inspection_monitor = InspectionMonitor()

        return InspectionMonitor.inspection_monitor

    def addTask(self, inspection_id, site_id, robot_ids):
        self.task_list[inspection_id] = {
            'site_id': site_id,
コード例 #21
0
ファイル: engine.py プロジェクト: seraphln/chat2all
from os.path import dirname

import tornado.web
import tornado.ioloop

from models import conn
from utils.config import config
from utils.logger import getLogger

from views.IndexHandler import IndexHandler
from views.LoginHandler import LogoutHandler
from views.LoginHandler import QQLoginHandler
from views.LoginHandler import DirectLoginHandler


logger = getLogger("chat2all.engine")


def make_app():
    """ make an app instance to start the server """
    settings = {'static_path': join(dirname(__file__), 'static'),
                'template_path': join(dirname(__file__), 'templates')}

    cookie_secret = config.get('cookie_secret')

    app = tornado.web.Application([(r"/qq_redirect/", QQLoginHandler),
                                   (r"/login/", DirectLoginHandler),
                                   (r"/logout/", LogoutHandler),
                                   (r"/", IndexHandler)],
                                   cookie_secret=cookie_secret,
                                   **settings)
コード例 #22
0
 def logger(self):
     if self._logger is None:
         self._logger = getLogger('statedb')
     return self._logger
コード例 #23
0
ファイル: a3c.py プロジェクト: scientist1642/bombora
import math
import os
import sys

import torch
import torch.nn.functional as F
import torch.optim as optim

from torch.autograd import Variable
from torchvision import datasets, transforms
from utils import logger

logger = logger.getLogger(__name__)


def ensure_shared_grads(model, shared_model):
    for param, shared_param in zip(model.parameters(),
                                   shared_model.parameters()):
        if shared_param.grad is not None:
            return
        shared_param._grad = param.grad


def train(rank,
          args,
          shared_model,
          Model,
          make_env,
          gl_step_count,
          optimizer=None):
    torch.manual_seed(args.seed + rank)
コード例 #24
0
from django.utils.deprecation import MiddlewareMixin

from utils.logger import getLogger

logger = getLogger('middle')


class ExceptionMiddleware(MiddlewareMixin):
    def process_exception(self, request, exception):
        """
        记录所有的异常日志
        """
        logger.error(f'URL=[{request.path}], {exception}')
コード例 #25
0
# 在requests headers中,禁用删除If - Modified-Since 和If-None-Natch 这两项
import time

import requests
from jsonpath import jsonpath
from utils.decrypt_data import decrypt_data
from utils.logger import getLogger
from utils.crack_password import crack_pwd
from utils.util import filter_data, quchong

logger = getLogger('new_his')

USER = '******'
PASSWORD = '******'
ACCESS_TOKEN = None
K1 = None
K2 = None


def login():
    global ACCESS_TOKEN, K1, K2
    url = 'http://his.zheyi.com/app-sso/oauth/token'
    params = {
        'grant_type': 'password',
        'username': USER,
        'password': crack_pwd(PASSWORD),
        'verifyCode': '',
        'sessionId': '',
    }
    basic_authorize = {
        'Authorization': 'Basic bm9idWc6Z2l2ZW1lZml2ZQ==',
コード例 #26
0
ファイル: test.py プロジェクト: Joaoloula/pytorch-a3c
import sys
import time

import torch
import torch.nn.functional as F
import torch.optim as optim
import tensorboard_logger as tb

from envs import create_atari_env
from model import ActorCritic
from torch.autograd import Variable
from torchvision import datasets, transforms
from collections import deque
from utils import logger

logger = logger.getLogger('test')


def test(rank, args, shared_model, gl_step_cnt):
    torch.manual_seed(args.seed + rank)

    env = create_atari_env(args.env_name)
    env.seed(args.seed + rank)

    model = ActorCritic(env.observation_space.shape[0], env.action_space)

    model.eval()

    state = env.reset()
    state = torch.from_numpy(state)
    reward_sum = 0
コード例 #27
0
# -*- coding:utf-8-*-
from threading import Thread

from utils.logger import getLogger

logger = getLogger('utils')


def quchong(array):
    seen = set()
    new_array = []
    for a in array:
        temp = tuple(a.items())
        if temp not in seen:
            seen.add(temp)
            new_array.append(a)
    return new_array


def capital_to_lower(doc):
    assert isinstance(doc, dict)
    new = {}
    for key, value in doc.items():
        new[key.lower()] = value
    return new


def filter_data(ybzl, obj_data):
    # assert len(obj_data) == 5

    if isinstance(ybzl, dict) and ybzl.get(20104):
コード例 #28
0
                    (all_labels, labels.detach().cpu().numpy()), axis=0)
                all_logits = np.concatenate(
                    (all_logits, logits.detach().cpu().numpy()), axis=0)

    acc = accuracy(all_logits, all_labels)
    f1 = f1_score(all_logits, all_labels)
    return np.array(loss).mean(), acc, f1


if __name__ == "__main__":

    # 创建存储目录
    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    logger = getLogger(__name__, os.path.join(args.save_dir, 'log.txt'))

    if 'roberta' in args.model_type:
        MatchModel = RobertaMatchModel
        Tokenizer = RobertaTokenizer
    elif 'albert' in args.model_type:
        MatchModel = AlbertMatchModel
        Tokenizer = AlbertTokenizer
    elif 'bert' in args.model_type:
        MatchModel = BertMatchModel
        Tokenizer = BertTokenizer
    # elif 'ernie' in args.model_type:
    #     MatchModel =
    #     Tokenizer = AutoTokenizer

    if args.do_train:
コード例 #29
0
import sys
import json

import rospy
import tf

from kafka import KafkaProducer
import config
from utils.ros_utils import initROSNode
from utils.logger import getLogger

logger = getLogger('taskcenter_simulator')
logger.propagate = False

task_producer = KafkaProducer(
    bootstrap_servers=config.Kafka_Brokers,
    compression_type='gzip',
    value_serializer=lambda x: json.dumps(x).encode())


def startTask():
    # task_body = {
    #     "task_type": 0, # 0 for Task_Inspection
    #     "inspection_id": 6,
    #     "site_id": "bj01",
    #     "robots": [
    #         {
    #             "robot_id": "rosbot1", #"tb3_0",
    #             "model": "rosbot2_pro", #waffle_pi",
    #             "original_pos": "0.0#0.0#0.0", # x-y-angle
    #             "subtasks": [
コード例 #30
0
ファイル: xuetou.py プロジェクト: huazhicai/zheyi
from data_structure.singleton_structure_content import new_content
from huayan import HuayanData
from lis_new import LisData
from menzhen import MenZhenData
from new_his import NewHis
from yihui import YihuiSystem
from yingxiang import YingXiang
from zhuyuan import HisSystem
from utils.logger import getLogger
import warnings
import pymongo

warnings.filterwarnings("ignore")
import pymssql

logger = getLogger('xuetou')

# 数据存储
client = pymongo.MongoClient(MONGO_HOST)
db = client[MONGO_DB]
collection = db[COLLECTION]
# collection = db['origin']

connect = pymssql.connect(host=host, port=port, user=user, password=password, database=database)
cursor = connect.cursor()

UPDATE_SUCCESS_IDS = []

with open('config/jianyan_field.json', 'r') as f:
    jianyan_config = json.load(f)
コード例 #31
0
import threading
import time


from config import Inspection_Status_Codes, Enable_Influx, Pos_Value_Splitter, Robot_Model

from navigation.turtlebot_launch import Turtlebot_Launcher
from navigation.turltlebot_cruise import runRoute
from navigation.turtlebot_robot_status import setRobotWorking, setRobotIdel, isRobotWorking, isInspectionRunning, isInspectionRepeated
from utils.msg_utils import sendTaskStatusMsg, sendDiscoveryStopRecords
from utils.ros_utils import killNavProcess, checkMapFile
from monitor import InspectionMonitor

from utils.logger import getLogger
logger = getLogger('execInspection')
logger.propagate = False

inspection_monitor = InspectionMonitor.getInspectionMonitor()

def execInspection(data):
    try: 
        inspection_id = int(data['inspection_id'])
        inspection_type = int(data['task_type'])
        site_id = str(data['site_id'])
        robots = data['robots']
        robot_ids =  [robot['robot_id'] for robot in robots]
        trigger = None if 'trigger_alarms' not in data.keys() else data['trigger_alarms']
        '''
        #assign robot model by split robot_id
        '''
        for robot in robots: