Exemple #1
0
 def __init__(self):
     tello_center.Service.__init__(self)
     self.logger = sl4p.Sl4p(YoloService.name)
     self.backend = tello_center.service_proxy_by_class(
         tello_abs.TelloBackendService
     )  # type: tello_abs.TelloBackendService
     self.task_loop = None
Exemple #2
0
 def __init__(self, name):
     self.name = name
     self.logger = sl4p.Sl4p('fps_%s' % name)
     self.call_list = []
     self.max_recode_count = 120
     self.latest_print_time = None
     self.print_duration = 1
     self.fps = None
Exemple #3
0
 def __init__(self, config=None):
     Service.__init__(self)
     self.logger = sl4p.Sl4p('config_service')
     self.config = {
         ConfigService.CONFIG_DEBUG: True,
         ConfigService.CONFIG_SERVICE_BLACK_LIST: []
     }
     if config is not None:
         for key in config:
             self.config[key] = config[key]
Exemple #4
0
 def mm():
     logger = sl4p.Sl4p("__main__", "1")
     logger.info("start")
     logger.info("CUDA %s" % str(torch.cuda.is_available()))
     test_files = [0, 1, 2, 3, 4]
     for s in test_files:
         img = cv2.imread(
             os.path.join(detector.main_dir, 'data/samples/%s.jpg' % s))
         start = time.time()
         logger.info("start detect %s" % s)
         result_obj = DetectImage(detector, img).invoke_on(loop)
         # detector.draw_result(img, result_obj)
         end = time.time()
         logger.info("time: " + str(end - start) + "s")
         for r in result_obj:
             logger.info(str(r))
Exemple #5
0
    def __init__(self):
        tello_center.Service.__init__(self)
        self.logger = sl4p.Sl4p(MainControl.name)
        self.backend = tello_center.service_proxy_by_class(tello_abs.TelloBackendService)  # type: tello_abs.TelloBackendService
        self.yolo = tello_center.service_proxy_by_class(tello_yolo.YoloService)  # type: tello_yolo.YoloService
        self.judge = tello_center.service_proxy_by_class(tello_judge_client.JudgeClientService)  # type:tello_judge_client.JudgeClientService
        self.stage = None
        self.args = None

        self.stage_wait_for_start = Stage('wait_for_start', func_do=self.wait_for_start)
        self.stage_find_fire = Stage('find_fire', func_do=self.search_fire)
        self.found_fire = None
        self.search_min = True

        self.stage_go_to_step2_start_pos = Stage('step2', func_do=self.step2)

        self.stage_land = Stage('land', func_do=self.land)
        self.stage_idle = Stage('idle', func_do=self.idle)
Exemple #6
0
    def __init__(self,
                 threshold,
                 weights=None,
                 iou_threshold=0.3,
                 num_class=6,
                 network='efficientdet-d4',
                 size_image=(512, 512)):
        super(Detect, self).__init__()
        self.logger = sl4p.Sl4p('my_detect')
        if weights is None:
            main_dir = os.path.split(os.path.abspath(__file__))[0]
            weights = os.path.join(
                main_dir, 'weights/checkpoint_efficientdet-d4_89.pth')
        self.weights = weights
        self.size_image = size_image
        self.device = torch.device(
            "cuda:0" if torch.cuda.is_available() else 'cpu')
        self.transform = get_augumentation(phase='test')
        self.show_transform = get_augumentation(phase='show')
        if self.weights is not None:
            self.logger.info('Load pretrained Model')
            checkpoint = torch.load(self.weights,
                                    map_location=lambda storage, loc: storage)
            num_class = checkpoint['num_class']
            network = checkpoint['network']

        self.model = EfficientDet(num_classes=num_class,
                                  network=network,
                                  is_training=False,
                                  threshold=threshold,
                                  iou_threshold=iou_threshold)

        if self.weights is not None:
            state_dict = checkpoint['state_dict']
            self.model.load_state_dict(state_dict)
        self.model = self.model.cuda()
        self.model.eval()
Exemple #7
0
    def __init__(self,
                 local_ip,
                 local_port,
                 imperial=False,
                 command_timeout=.3,
                 tello_ip='192.168.10.1',
                 tello_port=8889):
        """
        Binds to the local IP/port and puts the Tello into command mode.

        :param local_ip (str): Local IP address to bind.
        :param local_port (int): Local port to bind.
        :param imperial (bool): If True, speed is MPH and distance is feet.
                             If False, speed is KPH and distance is meters.
        :param command_timeout (int|float): Number of seconds to wait for a response to a command.
        :param tello_ip (str): Tello IP.
        :param tello_port (int): Tello port.
        """
        self.logger = sl4p.Sl4p("tello_base", "1;33")
        self.do_print_info = True
        self.filter = None
        self.request_lock = threading.Lock()
        self.response_handler_lock = threading.Lock()
        self.response_handler = None

        self.abort_flag = False
        self.command_timeout = command_timeout
        self.imperial = imperial
        self.response = None
        self.frame = None  # numpy array BGR -- current camera output frame
        self.is_freeze = False  # freeze current camera output
        self.last_frame = None

        self.log = []
        self.MAX_TIME_OUT = 5.0

        self.socket = socket.socket(
            socket.AF_INET, socket.SOCK_DGRAM)  # socket for sending cmd

        self.socket_state = socket.socket(socket.AF_INET,
                                          socket.SOCK_DGRAM)  # state socket
        self.tello_ip = tello_ip
        self.tello_address = (tello_ip, tello_port)
        self.last_height = 0
        self.socket.bind((local_ip, local_port))

        # thread for receiving cmd ack
        self.receive_thread = threading.Thread(target=self._receive_thread)
        self.receive_thread.daemon = True

        self.receive_thread.start()

        # to receive video -- send cmd: command, streamon
        self.socket.sendto(b'command', self.tello_address)
        self.logger.info('into command mode')
        self.socket.sendto(b'streamon', self.tello_address)
        self.logger.info('open video stream')

        # thread for receiving video
        self.receive_video_thread = threading.Thread(
            target=self._receive_video_thread)
        self.receive_video_thread.daemon = True
        self.receive_video_thread.start()

        # state receive
        self.results = None
        self.socket_state.bind((local_ip, 8890))
        self.receive_state_thread = threading.Thread(
            target=self._receive_state_thread)
        self.receive_state_thread.daemon = True
        self.receive_state_thread.start()

        self.stop = False
        self.latest_safe_state = None

        self.state_lock = threading.Lock()
        self.video_lock = threading.Lock()
        self.state = None
        self.image = None
        self.has_takeoff = False
Exemple #8
0
 def __init__(self):
     tello_center.Service.__init__(self)
     self.logger = sl4p.Sl4p('imshow')
     self.looper = None
Exemple #9
0
import torch.nn as nn
from tqdm import tqdm

import sl4p
from . import torch_utils  # , google_utils

matplotlib.rc('font', **{'size': 11})

# Set printoptions
torch.set_printoptions(linewidth=1320, precision=5, profile='long')
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format})  # format short g, %precision=5

# Prevent OpenCV from multithreading (to use PyTorch DataLoader)
cv2.setNumThreads(0)

utils_logger = sl4p.Sl4p("utils", "1;36")

def floatn(x, n=3):  # format floats to n decimals
    return float(format(x, '.%gf' % n))


def init_seeds(seed=0):
    random.seed(seed)
    np.random.seed(seed)
    torch_utils.init_seeds(seed=seed)


def load_classes(path):
    # Loads *.names file at 'path'
    with open(path, 'r') as f:
        names = f.read().split('\n')
Exemple #10
0
from control import tello_abs, tello_data, tello_judge_client
from world.world import *
import sl4p
import numpy as np

go_logger = sl4p.Sl4p('__go__')


def clamp_abs(v, minv, maxv):
    vv = max(min(abs(v), maxv), minv)
    return vv if v > 0 else (0 if v == 0 else -vv)


def find_most_possible_object(collect):
    poss = []
    for ss in collect:
        if collect[ss]['count'] < 6 or collect[ss]['max_conf'] < 0.80 or collect[ss]['object_conf'] < 0.4:
            continue
        else:
            poss.append(collect[ss])
    if len(poss) == 0:
        return None
    if len(poss) == 1:
        return poss[0]
    if len(poss) > 1:
        max_obj = None
        for s in poss:
            if max_obj is None or s['object_conf'] > max_obj['object_conf']:
                max_obj = s
        return max_obj
Exemple #11
0
# coding=utf-8
import time
import threading
from utils import fps
import sl4p
import locks

_services = {}  # type: dict[Service]
_start_order = []

_logger = sl4p.Sl4p("service_center")
_logger.enable = True


class Service:
    def __init__(self):
        self.started = False
        self.run_flag = True

    def flag(self):
        return self.run_flag

    def available(self):
        return True

    def on_register(self):
        pass

    def call_start(self):
        self.start()
        self.started = True
Exemple #12
0
import os
import re
import math
import collections
from functools import partial
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils import model_zoo
import sl4p

util_logger = sl4p.Sl4p('util')

########################################################################
############### HELPERS FUNCTIONS FOR MODEL ARCHITECTURE ###############
########################################################################

# Parameters for the entire model (stem, all blocks, and head)
GlobalParams = collections.namedtuple('GlobalParams', [
    'batch_norm_momentum', 'batch_norm_epsilon', 'dropout_rate', 'num_classes',
    'width_coefficient', 'depth_coefficient', 'depth_divisor', 'min_depth',
    'drop_connect_rate', 'image_size'
])

# Parameters for an individual model block
BlockArgs = collections.namedtuple('BlockArgs', [
    'kernel_size', 'num_repeat', 'input_filters', 'output_filters',
    'expand_ratio', 'id_skip', 'stride', 'se_ratio'
])

# Change namedtuple defaults
Exemple #13
0
 def __init__(self):
     JudgeServerInterface.__init__(self)
     self.logger = sl4p.Sl4p('judge_http')
     self.base = 'http://127.0.0.1:5000'
Exemple #14
0
 def __init__(self):
     JudgeServerInterface.__init__(self)
     self.logger = sl4p.Sl4p('judge_local')
     self.targets = None
     self.results = None
     self.next_receive_idx = 1
Exemple #15
0
        帮助处理target顺序
        @param chest_idx:
        @param obj_name:
        @return: CODE_ERROR_TARGET = 0, CODE_CONTINUE = 1, CODE_TASK_DONE = 2
        """
        if self.targets is None:
            self.targets = self.server.get_targets()
            for i in range(len(self.targets)):
                self.targets_idx[self.targets[i]] = i + 1

        self.chest_info[chest_idx].obj_name = obj_name
        return self.update_chest_info()


if __name__ == '__main__':
    logger = sl4p.Sl4p('__main__')
    tello_center.register_service(tello_center.ConfigService(config={}))
    tello_center.register_service(JudgeClientService())
    tello_center.register_service(JudgeServerLocal())
    tello_center.start_all_service()

    client = tello_center.service_proxy_by_class(
        JudgeClientService)  # type: JudgeClientService

    client.server.takeoff()
    client.server.seen_fire()
    logger.info(code2name[client.put_chest_info(1, NAME_BABY)])
    logger.info(code2name[client.put_chest_info(3, NAME_CAT)])
    logger.info(code2name[client.put_chest_info(2, NAME_GAS_TANK)])
    logger.info(code2name[client.put_chest_info(4, NAME_FILES)])
    logger.info(code2name[client.put_chest_info(5, NAME_PAINTING)])
Exemple #16
0
import torch
import torch.nn as nn
import sl4p

torch_logger = sl4p.Sl4p("torch", "1;33")


def init_seeds(seed=0):
    torch.cuda.empty_cache()
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    # torch.backends.cudnn.deterministic = True  # https://pytorch.org/docs/stable/notes/randomness.html


def select_device(force_cpu=False, apex=False):
    # apex if mixed precision training https://github.com/NVIDIA/apex
    cuda = False if force_cpu else torch.cuda.is_available()
    device = torch.device('cuda:0' if cuda else 'cpu')

    if not cuda:
        torch_logger.info('Using CPU')
    if cuda:
        torch.backends.cudnn.benchmark = True  # set False for reproducible results
        c = 1024**2  # bytes to MB
        ng = torch.cuda.device_count()
        x = [torch.cuda.get_device_properties(i) for i in range(ng)]
        cuda_str = 'Using CUDA ' + ('Apex ' if apex else '')
        for i in range(0, ng):
            if i == 1:
                # torch.cuda.set_device(0)  # OPTIONAL: Set GPU ID