Beispiel #1
0
def run_thread(node_id):
    """
    Run threaded node
    """
    print('Start Threaded process')
    try:
        node = storage.get(CustomNode, node_id)
        logger = Logger(node.user_id)
        test_file = {
            'status': 'started',
            'node_id': node_id,
            'messages': ['Starting flow']
        }
        with open('./api/running/{}.test'.format(node_id), 'w') as test:
            test.write(json.dumps(test_file))
        resp = node.run_node_task(({}, {}), logger, node_id)
        logger_content = str(logger)
        with open('./api/running/{}.test'.format(node_id), 'r') as test:
            test_file = json.loads(test.read())
        test_file['logger'] = dict(logger.json())
        test_file['status'] = 'completed'
        test_file['node_id'] = node_id
        with open('./api/running/{}.test'.format(node_id), 'w') as test:
            test.write(json.dumps(test_file))
        logger.reset()
        print('Finishing thread')
        # thre = threading.currentThread()
        storage.close()
        # threading.Event().set()
        # Thread.join(thre, None)
    except Exception as e:
        print('Thread Fail:\n\t', e)
        traceback.print_exc()
Beispiel #2
0
def save_training_images_paths(path_pre_processed_positive,
                               path_pre_processed_negative):
    '''
    Save detector_training image paths in txt files.
    '''
    positive_training_images = Files(path_pre_processed_positive)
    negative_training_images = Files(path_pre_processed_negative)

    positive_path = "positive.txt"
    logger = Logger()
    logger.log(Logger.INFO, "Saving positive paths in: " + positive_path)

    positive_file = open(positive_path, "w")
    for file_path in positive_training_images.paths:
        base_name = ntpath.basename(file_path)
        save_path = path_pre_processed_positive + base_name
        positive_file.write(save_path + "\n")
    positive_file.close()

    negative_path = "negative.txt"
    logger.log(Logger.INFO, "Saving negative paths in: " + negative_path)

    negative_file = open(negative_path, "w")
    for file_path in negative_training_images.paths:
        base_name = ntpath.basename(file_path)
        save_path = path_pre_processed_negative + base_name
        negative_file.write(save_path + "\n")
    negative_file.close()
Beispiel #3
0
def training(argv):
    '''
    Train license plate detector.
    '''
    # Parses args.
    arg_parser = ArgumentParser(description='Load and plot image.')
    arg_parser.add_argument('-c',
                            '--config',
                            dest='config_file',
                            default='config.ini',
                            help='Configuration file')
    args = vars(arg_parser.parse_args())

    # Parses configuration file.
    config_parser = SafeConfigParser()
    config_parser.read(args['config_file'])
    path_pre_processed_negative = config_parser.get(
        'data', 'path_pre_processed_negative')
    total_negative_files = len(Files(path_pre_processed_negative).paths)
    negative_file = "negative.txt"
    positive_file = "positive.vec"
    reserved_memory = str(config_parser.get('training', 'reserved_memory'))
    total_positive_files = config_parser.get('training',
                                             'total_positive_files')
    training_width = config_parser.get('training', 'training_width')
    training_height = config_parser.get('training', 'training_height')
    detector_command = "opencv_traincascade -data classifier -vec " + positive_file + " -bg " + negative_file + " -numStages 20 -minHitRate 0.999 -maxFalseAlarmRate 0.5 -numPos " + str(
        total_positive_files
    ) + " -numNeg " + str(total_negative_files) + " -w " + str(
        training_width
    ) + " -h " + str(
        training_height
    ) + " -mode ALL -precalcValBufSize " + reserved_memory + " -precalcIdxBufSize " + reserved_memory

    # Create script to train classifier.
    logger = Logger()
    logger.log(Logger.INFO, "Creating script training.sh")
    training_path = "training.sh"
    training_file = open(training_path, "w")
    training_file.write("#!/bin/bash\n")
    training_file.write(detector_command)
    training_file.close()

    # Training classifier.
    logger.log(Logger.INFO, "Start trainning.")
    call(["sh", training_path])
class LiteralConstants:
    class FileType(enum.Enum):
        REG = 1
        JSON = 2
        BYTES = 3

    class ChatType:
        PRIVATE: str = "private"
        GROUP: str = "group"

    class BotCommands:
        HELP: str = "help"
        START: str = "start"
        ADD_AUDIO: str = "addaudio"
        LST_AUDIO: str = "listaudio"
        RM_AUDIO: str = "rmaudio"
        RM_ALL_AUDIOS: str = "rmallaudios"

    class BotAnswers:
        SEND_AUDIO: str = "Send audio or voice note."
        MAX_OWN_AUDIOS: str = "Sorry, you reached maximun number of stored audios (50). Try removing some of them with /rmaudio command."
        PROVIDE_DESC: str = "Saved!\n\nProvide now a short description for the audio. 30 character allowed."
        NOT_AUDIO: str = "Audio file are not detected. Are you sure you've uploaded the correct file? Try it again with /addaudio command."
        WRONG_DESC: str = "Wrong input. Write a short description to save the audio. 30 characters maximum."
        USED_DESC: str = "Description is already in use. Please, write another one."
        SAVED: str = "Saved audio with description: \"%s\""
        LST_NONE_AUDIO: str = "Sorry, you don't have any uploaded audio... Try to upload one with /addaudio command."
        RM_AUDIO: str = "Send the description of the audio you want to remove."
        RM_ALL_AUDIO: str = "Are you completely sure you want to delete all your audios? Answer 'CONFIRM' in uppercase to verify this action."
        RM_ALL_NOT_CONFIRM: str = "You should have answered 'CONFIRM' to validate the deletion. Canceling action."
        RM_DESC_NOT_TEXT: str = "Wrong input. Send the description of the audio you want to remove. Try again /rmaudio."
        RM_USED_DESC: str = "No audio with the provided description. Please, send the correct description. Try again /rmaudio."
        DELETED_AUDIO: str = "The file was deleted from your audios."
        DELETED_ALL_AUDIO: str = "All your audios were deleted successfully."

    class FilePath:
        TOKEN: str = "data/token.txt"
        DB: str = "data/db.json"
        TTS: str = "data/magic.txt"
        HELP_MSG: str = "data/help.txt"
        STA_LOG: str = "data/status.log"
        MSG_LOG: str = "data/messages.log"
        QRY_LOG: str = "data/queries.log"
        AUDIOS: str = "audios/"

    class ExceptionMessages:
        DB_CONNECTED: str = "DB | Connected to MySQL database server, version "
        DB_UNCONNECTED: str = "DB | Error while trying to connect to MySQL database server\nError: "
        DB_DISCONNECTED: str = "DB | Disconnected from MySQL database server"
        DB_READ: str = "DB | Unable to fetch data from database\nSQL query: "
        DB_WRITE: str = "DB | Unable to write data in database\nSQL query: "
        FILE_CANT_OPEN: str = "File | Unable to open requested file\n"
        FILE_CANT_WRITE: str = "File | Unable to write provided data in this file\n"
        AUDIO_ERROR: str = "AUDIO | Unable to open file with mimetype %s\n"
        UNEXPECTED_ERROR: str = "Error | An unexpected error has occured\n"

    STA_LOG: Logger = Logger("Status log", FilePath.STA_LOG)
    MSG_LOG: Logger = Logger("Message logger", FilePath.MSG_LOG)
    QRY_LOG: Logger = Logger("Query logger", FilePath.QRY_LOG)

    MAX_QUERIES: int = 100000

    LANGUAGES: Dict[str, str] = {
        "AR العربية": "Ar",
        "Deutsch DE": "De-de",
        "English UK": "En-uk",
        "English US": "En-us",
        "Español ES": "Es-es",
        "Español MX": "Es-mx",
        "Français FR": "Fr-fr",
        "Italiano IT": "It-it",
        "Português PT": "Pt-pt",
        "ελληνικά GR": "El-gr",
        "русский RU": "Ru-ru",
        "Türk TR": "Tr-tr",
        "中国 ZH": "Zh-cn",
        "日本の JA": "Ja",
        "Polski PL": "Pl"
    }
    __sortedLAN: List[Tuple[str, str]] = sorted(LANGUAGES.items(),
                                                key=itemgetter(0))
    SORTED_LANGUAGES: OrderedDict = OrderedDict(__sortedLAN)

    PROBLEMATIC_CHARS: Dict[str, str] = {
        "\n": " ",
        "’": "'",
        "‘": "'",
        "\"": "'",
        "“": "'",
        "”": "'",
        "…": "...",
        "<": "",
        ">": "",
        "#": "",
        "%": "",
        "{": "",
        "}": "",
        "|": "",
        "^": "",
        "~": "",
        "[": "",
        "]": "",
        "`": "",
        ";": "",
        "/": ""
    }  # ? : @ = &

    CONTENT_TYPE: List[str] = ['audio', 'voice', 'video']
class ConfigData:
    '''
    Parses license plate file.
    '''

    _logger = Logger()
    '''
    Save and read config data file. 
    '''
    def __init__(self, quadrilaterals, image_path=""):
        self.quadrilaterals = quadrilaterals

        if image_path != "":
            self.get_file_path_by_image_path(image_path)

    def set_config_path(self, image_path):
        '''
        Set config file path using image_path.
        '''
        self.get_file_path_by_image_path(image_path)

    def get_file_path_by_image_path(self, image_path):
        '''
        Get file path using image path.
        '''

        self.file_name = ntpath.basename(image_path)
        self.file_name = os.path.splitext(self.file_name)[0]
        self.folder_path = self.save_path = os.path.dirname(
            os.path.abspath(image_path))
        self.save_path = os.path.join(self.save_path, self.file_name + ".txt")
        return self.save_path

    def set_license_plates(self, license_plates_text):
        '''
        Get license plates by text.
        '''
        self.license_plates = [
            x.strip() for x in license_plates_text.split(',')
        ]

        for license_plate in self.license_plates:
            if not license_plate:
                self.license_plates.remove(license_plate)

    def save_file(self, quadrilaterals):
        '''
        Save config file.
        '''
        file_data = ""

        total = len(quadrilaterals.data)
        if total == 0:
            file_data = "None"
        else:
            for index_quadrilateral in range(total):
                quadrilateral = quadrilaterals.data[index_quadrilateral]

                for point in quadrilateral.points:
                    file_data += str(int(point.x)) + "," + str(int(
                        point.y)) + ","

                file_data += self.license_plates[index_quadrilateral]

                if index_quadrilateral != total - 1:
                    file_data += "\n"

        config_file = open(self.save_path, "w")
        config_file.write(file_data)
        config_file.close()

    def read_data(self):
        '''
        Read config data from file.
        '''
        self.license_plates = []
        self.license_plates_text = ""
        self.quadrilaterals.clean()
        self._logger.log(Logger.INFO,
                         "Reading configuration data from " + self.save_path)

        if os.path.isfile(self.save_path):
            f = open(self.save_path, "r")
            read_data = []

            for line in f:
                read_data.append(line.rstrip('\n'))

            if not (len(read_data) == 1 and read_data == "None"
                    or len(read_data) == 0):
                for index_line in range(len(read_data)):
                    line = read_data[index_line]

                    data_list = [x.strip() for x in line.split(',')]

                    total = len(data_list)
                    if total == 9:
                        quadrilateral = self.quadrilaterals.add_quadrilateral()

                        for data_index in range(4):
                            quadrilateral.add_point(
                                Point(int(data_list[data_index * 2]),
                                      int(data_list[data_index * 2 + 1])))

                        self.license_plates.append(data_list[8])

                        if index_line == 0:
                            self.license_plates_text = data_list[8]
                        else:
                            self.license_plates_text += "," + data_list[8]
            f.close()
Beispiel #6
0
# coding=utf-8
# --author='fangfang'

from models.mobileFunction import FunctionLibrary
from models.screenshot import Screenshot
from models.logger import Logger

logger = Logger(logger="processed").getlog()


class Process:
    def __init__(self, driver, case_modle, case_id):
        self.driver = driver
        self.functionlibrary = FunctionLibrary(self.driver)
        self.screenshot = Screenshot(self.driver, case_modle, case_id)

    def login(self, username, pasword):
        # self.functionlibrary.ClickByName("我的")
        self.functionlibrary.waitNAME("登录")
        self.functionlibrary.SendKeysByID("com.ygjr.ycd.debug:id/login_et_name", username)
        self.functionlibrary.swipeOnScreenN(0.5, 0.5, 0.5, 0.5, 1)
        self.functionlibrary.SendKeysByID("com.ygjr.ycd.debug:id/login_et_pwd", pasword)
        self.functionlibrary.ClickByName("登录")
        self.functionlibrary.ClickByName("取消")
        self.functionlibrary.waitNAME("首页")
        self.screenshot.takeTakesScreenshot('登录成功截图')
        logger.info("process login execute succeed!")

    def logout(self):
        self.functionlibrary.ClickByName("我的")
        self.functionlibrary.waitNAME("设置中心")
    # Parses configuration file.
    config_parser = SafeConfigParser()
    config_parser.read(args['config_file'])
    path_pre_processed_positive = config_parser.get(
        'data', 'path_pre_processed_positive')
    path_pre_processed_negative = config_parser.get(
        'data', 'path_pre_processed_negative')
    path_svm_hog_detector = config_parser.get('data', 'path_svm_hog_detector')
    path_svm_binary_detector = config_parser.get('data',
                                                 'path_svm_binary_detector')
    image_width = int(config_parser.get('training', 'image_width'))
    image_height = int(config_parser.get('training', 'image_height'))

    # Load images for training.
    logger = Logger()
    logger.log(Logger.INFO, "Loading images to train SVM classifier.")
    positive_images = Files(path_pre_processed_positive)
    negative_images = Files(path_pre_processed_negative)
    samples = []
    responses = []

    for file_path in positive_images.paths:
        # Load image.
        image = Image(file_path)

        # Convert and equalize image.
        image.convert_to_gray()

        # Add image for training.
        samples.append(image)
Beispiel #8
0
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
from selenium import webdriver
import os.path

dir_path = get_obj_path()
config = ConfigParser()
# file_path = os.path.dirname(os.getcwd()) + '/config/config.ini'
file_path = dir_path + '\\config\\config.ini'
config.read(file_path)
# 定义查找元素时间
# print(file_path)
timeout = int(config.get("waiTime", "time"))
# create a logger instance
logger = Logger(logger="FunctionLibrary").getlog()


class ScriptError(Exception):
    '''
    返回报错信息类
    '''
    def __init__(self, value):
        self.value = value

    def __str__(self):
        return repr(self.value)


def exception(func):
    """
Beispiel #9
0
# -*- coding: utf-8 -*-
# @Time    : 2018/11/14 0014 下午 19:21
# @Author  : fangfang
# @File    : asset_items.py

from models.unittest_setup import Setup
from models.logger import Logger

logger = Logger(logger="template").getlog()


class AssetItem:
    def __init__(self, trade, caseid):
        self.trade = trade
        self.caseid = caseid

    def add_asset(self):
        # -------------实例化方法类----------
        case = Setup()
        case.asset_web_setup(self.trade, self.caseid)
        driver = case.driver
        functionlibrary = case.functionlibrary
        datatools = case.datatools
        screenshot = case.screenshot
        datas = datatools.getExcelDateRowsByValue(self.trade, self.caseid)

        # -----------业务流程逻辑-----------
        screenshot.takeTakesScreenshot("打开登录页面成功")
        functionlibrary.type(
            "xpath", "//*[@id='app']/div/div/form/div[1]/div/div/input",
            int(datas[6]))
Beispiel #10
0
'''
created on 2018-3-6
@author: Gigi
project: base class

'''
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import sys
sys.path.append("..")
import models.logger
from models.logger import Logger
import time

logger = 'BasePage'
mylogger = Logger(logger).getlog()


class BasePage(object):
    """docstring for BasePage"""

    #
    def __init__(self, selenium_webdriver, base_url, pagetitle):
        self.driver = selenium_webdriver
        self.base_url = base_url
        self.pagetitle = pagetitle
        self.timeout = 30

    def onpage(self, pagetitle):
        return pagetitle in self.driver.title
Beispiel #11
0
# coding=utf-8
# --author='fangfang'

from models.unittest_setup import Setup
import unittest
from models.logger import Logger

logger = Logger(logger="test_case").getlog()


class LendRecord:
    def __init__(self, trade, caseid):
        """
        定义案例trade和caseid
        :return:
        """
        self.trade = trade
        self.caseid = caseid

    def lend_record(self):
        # -------实例化基础类-----
        case = Setup()
        case.ycd360_mobile_setup(self.trade, self.caseid)
        datatools = case.datatools
        driver = case.driver
        functionlibrary = case.functionlibrary
        process = case.process
        screenshot = case.screenshot
        logger.info('------' + self.caseid + '开始执行------')

        # -----获取测试数据------
Beispiel #12
0
  
 # Parses configuration file.
 config_parser = SafeConfigParser()
 config_parser.read(args['config_file'])    
 path_original_positive = config_parser.get('data', 'path_original_positive')
 character_original_width = int(config_parser.get('training', 'character_original_width'))
 character_original_height = int(config_parser.get('training', 'character_original_height'))
 path_letter_images = config_parser.get('data', 'path_letter_images')
 path_letter_labels = config_parser.get('data', 'path_letter_labels')
 path_number_images = config_parser.get('data', 'path_number_images')
 path_number_labels = config_parser.get('data', 'path_number_labels')
 image_width = 400
 image_height = 200
 
 # Load and pre process images.
 logger = Logger()
 logger.log(Logger.INFO, "Loading images.")
 positive_images = Files(path_original_positive)
 character_validator = CharacterValidator()
 number_labels = []
 number_images = []
 letter_labels = []
 letter_images = []
 
 for index in range(len(positive_images.paths)):
     file_path = positive_images.paths[index]
     
     # Load image and configuration data.
     image_original = Image(file_path)
     config_data = ConfigData(Quadrilaterals(), file_path)
     config_data.read_data()
Beispiel #13
0
# coding=utf-8
# --author='fangfang'

import pymysql
import readConfig
from models.logger import Logger

log = Logger(logger="assetDataBase").getlog()


class AssetDB:
    def __init__(self):
        rc = readConfig.ReadConfig()
        self.host = rc.get_value("assetSqlInfo", "host")
        self.username = rc.get_value("assetSqlInfo", "user")
        self.password = rc.get_value("assetSqlInfo", "passwd")
        self.port = int(rc.get_value("assetSqlInfo", "port"))
        self.database = rc.get_value("assetSqlInfo", "db")
        self.db = None
        self.cursor = None

    def connectDB(self):
        """
        connect to database
        :return:
        """
        try:
            # connect to DB
            self.db = pymysql.connect(host=self.host,
                                      port=self.port,
                                      user=self.username,
Beispiel #14
0
from models.consumer import Consumer
from models.logger import Logger
from util import generate_mongo_uri
import os
from dotenv import load_dotenv
import pika
import ssl
load_dotenv()
credentials = pika.PlainCredentials(os.getenv("DB_USERNAME"), os.getenv("PASSWORD"))
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
config = {'host': 'b-6cdeb3fd-c4a0-432f-b393-287562fabdcb.mq.us-east-2.amazonaws.com', 'port': 5671, 'exchange': 'system-integration', 'queue_name': 'logs'}
mongo_uri = generate_mongo_uri(os.getenv("DB_NAME"), os.getenv("DB_USERNAME"), os.getenv("PASSWORD"))
logger = Logger(mongo_uri, os.getenv("DB_NAME"))

consumer = Consumer(config=config, logger=logger, credentials=credentials, context=context)
if __name__ == '__main__':
    print(os.getenv("DB_USERNAME"))
    print(mongo_uri)

    consumer.setup_fanout()

# See PyCharm help at https://www.jetbrains.com/help/pycharm/
Beispiel #15
0
# coding=utf-8
# --author='fangfang'

from models.httpTool import Http
from models.logger import Logger

log = Logger(logger="asset_login").getlog()


def switch_position(session, position):
    # 根据岗位判断岗位编号
    position_data = {'营业部': '252', '运营部': '253', '风控部': '254', '服务部': '255', '财务部': '256', '发标部': '257', '管理员': '258'}
    positionId = position_data[position]
    # 实例化http请求
    http = Http()
    headers = {"Connection": "keep-alive", "Content-Type": "application/json;charset=UTF-8",
               "Cookie": "SESSION=" + session}
    url = "/api/asset/accounts/switch/position"
    data = {"positionId": positionId}
    http.set_url(url)
    http.set_headers(headers)
    http.set_data(data)
    r = http.putWithJson()
    status = r.status_code
    rt = r.text
    r_json = r.json()
    log.info("访问switch_position接口response:%s" % rt)
    return r_json
  character_original_width = int(config_parser.get('training', 'character_original_width'))
  character_original_height = int(config_parser.get('training', 'character_original_height'))
  path_letter_images = config_parser.get('data', 'path_letter_images')
  path_letter_labels = config_parser.get('data', 'path_letter_labels')
  path_letter_classifier = config_parser.get('data', 'path_letter_classifier')
  path_number_images = config_parser.get('data', 'path_number_images')
  path_number_labels = config_parser.get('data', 'path_number_labels')
  path_number_classifier = config_parser.get('data', 'path_number_classifier')
  path_number_knn_labels_classifier = config_parser.get('data', 'path_number_knn_labels_classifier')
  path_number_knn_images_classifier = config_parser.get('data', 'path_number_knn_images_classifier')
  path_letter_knn_labels_classifier = config_parser.get('data', 'path_letter_knn_labels_classifier')
  path_letter_knn_images_classifier = config_parser.get('data', 'path_letter_knn_images_classifier')
  character_classifier_type = config_parser.get('data', 'character_classifier_type')
  
  # Load images and labels.
  logger = Logger()
  logger.log(Logger.INFO, "Loading images to train classifiers.")
 
  number_images = np.loadtxt(path_number_images, np.uint8)
  number_labels = np.loadtxt(path_number_labels, np.float32)
  number_labels = number_labels.reshape((number_labels.size, 1))                
  converted_images = []
  labels = []
  
  for index in range(len(number_images)):
      image = number_images[index]
      reshaped = Image(image=image.reshape((character_original_height, character_original_width)))
      reshaped.binarize(adaptative=True)
      mean_value = np.mean(reshaped.data)
      
      if mean_value < 220:
Beispiel #17
0
# -*- coding:utf-8 -*-

from configparser import ConfigParser
from selenium import webdriver
from models.logger import Logger
from models.path import get_obj_path

logger = Logger(logger="BrowserEngine").getlog()
dir_path = get_obj_path()
chrome_driver_path = dir_path + '/tools/chromedriver.exe'
ie_driver_path = dir_path + '/tools/IEDriverServer.exe'
config = ConfigParser()
# file_path = os.path.dirname(os.getcwd()) + '/config/config.ini'
file_path = dir_path + '/config/config.ini'
config.read(file_path)


class BrowserEngine(object):
    def __init__(self):
        self.browser = config.get("browserType", "browserName")
        self.url = config.get("webServer", "URL")
        # read the browser type from config.ini file, return the driver

    def open_browser(self):

        driver = "浏览器参数错误"
        if self.browser == "Firefox":
            driver = webdriver.Firefox()
            logger.info("Starting firefox browser.")
        elif self.browser == "Chrome":
            driver = webdriver.Chrome(chrome_driver_path)
Beispiel #18
0
# coding=utf-8
# --author='fangfang'

import os, sys
from models.mobileDriver import Mobile
from models.path import get_obj_path
from models.logger import Logger

sep = os.path.sep  # 当前系统分隔符
logger = Logger(logger="Screenshot").getlog()


class Screenshot():
    def __init__(self, driver, case_modle, case_id):
        # ---截图对比用到的数据
        self.screenShotNum = 0  # 案例截图顺序
        self.transName = ""
        self.case_id = case_id
        # self.case_id = os.path.basename(sys.argv[0]).split(".")[0]
        # self.case_modle = os.path.dirname(get_obj_path())
        self.case_modle = case_modle
        self.image_path = sep.join([
            get_obj_path(), 'result\\image\\' + self.case_modle, self.case_id
        ])
        self.num = 0
        self.driver = driver
        # ---截图对比用到的数据end

    def takeTakesScreenshot(self, fileName, funcName=''):
        '''
        截图函数
class Image:
    '''
    Reads and process image. 
    '''
    
    _logger = Logger()
    data = None  # The image data.
    
    def __init__(self, file_path=None, image=None):
        # Create image from matrix.
        if image is not None:
            self._set_image_data(image)
        
        # Load image if user specified file path.
        elif file_path is not None:
            # Check if file exist.
            if os.path.isfile(file_path):
                # File exists.
                file_path = os.path.abspath(file_path)
            
                # Load image                
                self._logger.log(Logger.INFO, "Loading image " + file_path)
                image_data = cv2.imread(file_path)
                self.file_path = file_path
                
                # If image is in bgr, convert it to rgb.
                if len(image_data.shape) == 3:
                    # Image is in bgr.
                    
                    # Convert image to rgb.
                    image_data = cv2.cvtColor(image_data, cv2.COLOR_BGR2RGB)
                
                self._set_image_data(image_data)
            else:
                # File does not exist.
                self._logger.log(Logger.ERROR, "File '" + file_path + "' does not exist.")
    
    def _set_image_data(self, data):
        '''
        Set image data.
        '''
        # Check if image is in rgb or gray scale
        if len(data.shape) == 3:
            # Image is in rgb.
            self.height, self.width, self.channels = data.shape
        else:
            # Image is in gray scale.
            self.height, self.width = data.shape
            self.channels = 1
        
        self.data = data
    
    def filter_median(self, image=None, size=5):
        '''
        Filter image with median.
        '''
        if image is None and self.data is None:
            self._logger.log(Logger.ERROR, "There is no data to filter image.")
        elif image is not None:
            self._set_image_data(image)
            
        if self.data is not None:
            # Convert to gray scale if it image is in rgb.
            if len(self.data.shape) == 3:                
                self._logger.log(Logger.DEBUG, "We need to convert image to gray scale before filtering.")
                self.convert_to_gray(self.data)
                
            # Filter image.
            self.data = self.data = cv2.medianBlur(self.data, size)            
            
        return self.data
    
    def filter_gaussian_blur(self, image=None, size=5):
        '''
        Filter image with gaussian blur.
        '''
        if image is None and self.data is None:
            self._logger.log(Logger.ERROR, "There is no data to filter image.")
        elif image is not None:
            self._set_image_data(image)
            
        if self.data is not None:
            # Convert to gray scale if it image is in rgb.
            if len(self.data.shape) == 3:                
                self._logger.log(Logger.DEBUG, "We need to convert image to gray scale before filtering.")
                self.convert_to_gray(self.data)
                
            # Filter image.
            self.data = cv2.GaussianBlur(self.data, (size, size), 0)            
            
        return self.data
    
    def smart_equalize(self, image=None):
        '''
        Equalize image if it's too dark or too light.
        '''
        if image is None and self.data is None:
            self._logger.log(Logger.ERROR, "There is no data to equalize image.")
        elif image is not None:
            self._set_image_data(image)
            
        if self.data is not None:
            # Convert to gray scale if it image is in rgb.
            if len(self.data.shape) == 3:                
                self._logger.log(Logger.DEBUG, "We need to convert image to gray scale before equalization.")
                self.convert_to_gray(self.data)
                
            # Equalize image if it is too light or too dark.
            mean = np.mean(self.data)
    
            if mean < 70 or mean > 100:
                if mean > 200:
                    self.contrast()
                
                if mean < 20:
                    self.data = self.data.astype(int)
                    self.data = self.data * 2
                    self.data = np.uint8(self.data)
                    
                self.equalize()
                
        return self.data
    
    def equalize(self, image=None):
        '''
        Equalize image.
        '''
        if image is None and self.data is None:
            self._logger.log(Logger.ERROR, "There is no data to equalize image.")
        elif image is not None:
            self._set_image_data(image)
            
        if self.data is not None:
            # Convert to gray scale if it image is in rgb.
            if len(self.data.shape) == 3:                
                self._logger.log(Logger.DEBUG, "We need to convert image to gray scale before equalization.")
                self.convert_to_gray(self.data)
                
            # Equalize image.
            self._logger.log(Logger.INFO, "Equalizing image.")
            self.data = cv2.equalizeHist(self.data)
            
        return self.data
    
    def convert_to_gray(self, image=None):
        '''
        Convert rgb to gray.
        '''
        if image is None and self.data is None:
            self._logger.log(Logger.ERROR, "There is no data to convert image.")
        elif image is not None:
            self._set_image_data(image)
        
        if self.data is not None:
            # Convert image only if it is in rgb.
            if len(self.data.shape) == 3:
                self._logger.log(Logger.INFO, "Converting image to gray scale.")
                self.data = cv2.cvtColor(self.data, cv2.COLOR_RGB2GRAY)
            else:
                self._logger.log(Logger.INFO, "Image is already in gray scale.")
                
            self.channels = 1            
        
        return self.data
    
    def plot(self, image=None):
        '''
        Plot image.
        '''
        if image is None and self.data is None:
            self._logger.log(Logger.ERROR, "There is no data to plot image.")
        elif image is not None:
            self._set_image_data(image)
            
        # Display image if we have data for it.
        if self.data is not None:
            # Convert image to BGR, if it is in rgb.
            if self.channels == 3:
                image = cv2.cvtColor(self.data, cv2.COLOR_RGB2BGR)
            else:
                image = self.data
            
            # Plot image.    
            self._logger.log(Logger.INFO, "Plotting image.")
            cv2.imshow("Image", image)
            return cv2.waitKey()
    
    def resize(self, width, height, image=None):
        '''
        Resize image.
        '''
        if image is None and self.data is None:
            self._logger.log(Logger.ERROR, "There is no data to resize image.")
        elif image is not None:
            self._set_image_data(image)
            
        self._logger.log(Logger.INFO, "Resizing image to: width = " + str(width) + " height = " + str(height))
        
        try:
            if width > 0 and height > 0:
                resized = cv2.resize(self.data, (width, height), interpolation = cv2.INTER_AREA)
            elif self.width > 0 and self.height > 0:
                resized = np.zeros((self.width, self.height), dtype=np.uint8)
            else:
                resized = np.zeros((400, 200), dtype=np.uint8)
        except:
            if width > 0 and height > 0:
                resized = np.zeros((width, height), dtype=np.uint8)
            elif self.width > 0 and self.height > 0:
                resized = np.zeros((self.width, self.height), dtype=np.uint8)
            else:
                resized = np.zeros((400, 200), dtype=np.uint8)
        
        return Image(image = resized)
    
    def crop(self, origin, end, image=None):
        '''
        Crop image.
        '''
        if image is None and self.data is None:
            self._logger.log(Logger.ERROR, "There is no data to crop image.")
        elif image is not None:
            self._set_image_data(image)
        
        if self.data is not None:
            # Correct parameters.
            if origin.x >= self.width:
                origin.x = self.width - 1
            elif origin.x < 0:
                origin.x = 0
            
            if end.x >= self.width:
                end.x = self.width - 1
            elif end.x < 0:
                end.x = 0
            
            if origin.y >= self.height:
                origin.y = self.height - 1
            elif origin.y < 0:
                origin.y = 0
            
            if end.y >= self.height:
                end.y = self.height - 1
            elif end.y < 0:
                end.y = 0
            
            if origin.x > end.x:
                change = end.x
                end.x = origin.x
                origin.x = change
                       
            if origin.y > end.y:
                change = end.y
                end.y = origin.y
                origin.y = change
                
            self._logger.log(Logger.INFO, "Cropping image. Origin: (%d, %d) End: (%d, %d)" \
                % (origin.x, origin.y, end.x, end.y))
            return Image(image = self.data[origin.y:end.y, origin.x:end.x])
    
        
    def invert_binary(self, image=None):
        '''
        Invert binary image.
        '''
        
        if image is None and self.data is None:
            self._logger.log(Logger.ERROR, "There is no data to invert image.")
        elif image is not None:
            self._set_image_data(image)
            
        if self.data is not None:
            # Convert to gray scale if it image is in rgb.
            if len(self.data.shape) == 3:                
                self._logger.log(Logger.DEBUG, "We need to convert image to invert it.")
                self.convert_to_gray(self.data)
                self.binary(self.data)
                
            # Invert binary image.
            self._logger.log(Logger.INFO, "Invert binary image.")
            self.data = cv2.bitwise_not(self.data)
        
        return self.data
      
    def compute_morphologic(self, image=None, morphologic=1):
        '''
        Apply morphologic operation in image.
        '''
        if image is None and self.data is None:
            self._logger.log(Logger.ERROR, "There is no data to apply morphologic operation.")
        elif image is not None:
            self._set_image_data(image)
            
        if self.data is not None:
            # Convert to gray scale if it image is in rgb.
            if len(self.data.shape) == 3:                
                self._logger.log(Logger.DEBUG, "We need to convert image to gray scale before applying morphologic operation.")
                self.convert_to_gray(self.data)
                self.binary(self.data)
                
            # Apply morphologic operation.
            self._logger.log(Logger.INFO, "Apply morphologic operation in image.")
            
            if morphologic == 1:
                self.data = cv2.bitwise_not(self.data)
                element_3 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
                element_5 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
                
                # Erode and dilate.
                temp = self.data
                temp = cv2.erode(temp, element_3)
                temp = cv2.dilate(temp, element_3)
                
                # Dilate and erode.
                temp = cv2.erode(temp, element_3)
                self.data = cv2.dilate(temp, element_5)
                
                self.data = cv2.bitwise_not(self.data)
            else:
                self.data = cv2.bitwise_not(self.data)
                element_3 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
                element_5 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
                
                # Erode and dilate.
                temp = self.data
                temp = cv2.erode(temp, element_5)
                temp = cv2.dilate(temp, element_5)
                
                # Dilate and erode.
                temp = cv2.erode(temp, element_5)
                self.data = cv2.dilate(temp, element_5)
                
                self.data = cv2.bitwise_not(self.data)
                
        return self.data
    
    def compute_rectangles_for_characters(self, image=None):
        '''
        Compute rectangles in image.
        '''        
        if image is None and self.data is None:
            self._logger.log(Logger.ERROR, "There is no data to compute rectangles.")
        elif image is not None:
            self._set_image_data(image)
            
        if self.data is not None:
            if len(self.data.shape) == 3:                
                self._logger.log(Logger.DEBUG, "We need to convert image to gray scale before computing rectangles.")
                self.convert_to_gray(self.data)
            
            thresh = cv2.Canny(self.data, 1 ,100)
            contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            filtered_contours = []
            
            for contour in contours:
                x, y, w, h = cv2.boundingRect(contour)
                ratio = w * 1.0 / h
                
                if ratio > 0.15 and ratio < 1.1 and w*h > 400 and w*h < 7000 and w > 4 and h > 20:
                    filtered_contours.append(Rect([x, y, w, h]))
                
                if w*h < 400:
                    cv2.fillPoly(self.data, pts =[contour], color=(255))
                
                if w*h > 7000:
                    cv2.floodFill(self.data, None, (x , y), 255)
                    
            contours = filtered_contours
            filtered_contours = []
            
            for contour1 in contours:
                inside = False
                
                for contour2 in contours:
                    if contour1.inside(contour2) and contour1.x != contour2.x and contour1.y != contour2.y and \
                       contour1.w != contour2.w and contour1.h != contour2.h:
                        inside = True
                        break
                
                if not inside:
                    exists = False
                        
                    for contour2 in filtered_contours:
                        if contour1.inside(contour2):
                            exists = True
                            break
                    
                    if not exists:
                        filtered_contours.append(contour1)
  
        return filtered_contours
    
    def plot_rectangles(self, rectangles, image=None):
        '''
        Plot rectangles in image.
        '''        
    
        if image is None and self.data is None:
            self._logger.log(Logger.ERROR, "There is no data to plot rectangles.")
        elif image is not None:
            self._set_image_data(image)
            
        if self.data is not None:
            if len(self.data.shape) == 3:                
                self._logger.log(Logger.DEBUG, "We need to convert image to gray scale before plotting rectangles.")
                self.convert_to_gray(self.data)
            
            # Draw rectangles.
            image_data = cv2.cvtColor(self.data, cv2.COLOR_GRAY2RGB)
            
            for contour in rectangles:    
                cv2.rectangle(image_data, (contour.x, contour.y), (contour.x + contour.w, contour.y + contour.h), (0, 255, 0), 2)
            
            image = Image(image=image_data)
            return image.plot()
            
    def binarize(self, image=None, adaptative=False):
        '''
        Binarize image.
        '''
        
        if image is None and self.data is None:
            self._logger.log(Logger.ERROR, "There is no data to binarize image.")
        elif image is not None:
            self._set_image_data(image)
            
        if self.data is not None:
            # Convert to gray scale if it image is in rgb.
            if len(self.data.shape) == 3:                
                self._logger.log(Logger.DEBUG, "We need to convert image to gray scale before binarizing image.")
                self.convert_to_gray(self.data)
            
            # Convert to binary.
            _, data2 = cv2.threshold(self.data, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
            
            if adaptative:
                data1 = cv2.adaptiveThreshold(self.data, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 21, 2) 
                data1 = cv2.bitwise_not(data1)
                data2 = cv2.bitwise_not(data2)
                self.data = cv2.bitwise_and(data1, data2)
                self.data = cv2.bitwise_not(self.data)
            else:
                self.data = data2
                  
            for x in range(self.width / 3, self.width * 2 / 3):
                if x % 5 == 0:
                    cv2.floodFill(self.data, None, (x , 0), 255)
                    cv2.floodFill(self.data, None, (x , self.height - 1), 255)
                      
        return self.data

    def contrast(self, image=None):
        '''
        Constrast image.
        '''
        
        if image is None and self.data is None:
            self._logger.log(Logger.ERROR, "There is no data to contrast image.")
        elif image is not None:
            self._set_image_data(image)
            
        if self.data is not None:
            # Convert to gray scale if it image is in rgb.
            if len(self.data.shape) == 3:                
                self._logger.log(Logger.DEBUG, "We need to convert image to gray scale before contrasting image.")
                self.convert_to_gray(self.data)
                
            alpha = 2
            self.data = self.data.astype(int)
            self.data = np.power(self.data, alpha)
            self.data = np.multiply(self.data, 1.0 / 255 ** (alpha -1))
            self.data = np.uint8(self.data)                
        return self.data
    
    def compute_edges(self, image=None):
        '''
        Compute edges.
        '''
        
        if image is None and self.data is None:
            self._logger.log(Logger.ERROR, "There is no data to compute edges.")
        elif image is not None:
            self._set_image_data(image)
            
        if self.data is not None:
            # Convert to gray scale if it image is in rgb.
            if len(self.data.shape) == 3:                
                self._logger.log(Logger.DEBUG, "We need to convert image to gray scale before computing edges.")
                self.convert_to_gray(self.data)
            
            self.data = cv2.Canny(self.data,1 ,100)

        return self.data
    
    def compute_rectangles_for_plates(self, image=None):
        '''
        Compute rectangles in image.
        '''
        
        rectangles = []
        
        if image is None and self.data is None:
            self._logger.log(Logger.ERROR, "There is no data to compute rectangles.")
        elif image is not None:
            self._set_image_data(image)
            
        if self.data is not None:
            if len(self.data.shape) == 3:                
                self._logger.log(Logger.DEBUG, "We need to convert image to gray scale before computing rectangles.")
                self.convert_to_gray(self.data)
                
            thresh = cv2.Canny(self.data,1 ,100)
            contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            
            for contour in contours:    
                x, y, w, h = cv2.boundingRect(contour)
                
                min_size = 50
                ratio = w * 1.0 / h
                 
                if ratio > 1.8 and ratio < 5.0 and w > min_size and h > min_size:        
                    rect = Rect([x, y, w, h])
                    rect.haar_recognized = False
                    rectangles.append(rect)
    
        return rectangles
    
    def save(self, file_path=None, image=None):
        '''
        Save image in file path.
        '''
        
        if file_path is None and self.file_path is None:
            self._logger.log(Logger.ERROR, "There is no file path to save image.")
        elif file_path is not None:
            self.file_path = file_path
            
        if image is None and self.data is None:
            self._logger.log(Logger.ERROR, "There is no data to save image.")
        elif image is not None:
            self._set_image_data(image)
        
        if self.file_path is not None and self.data is not None:
            image = self.data
            
            if len(self.data.shape) == 3:
                image = cv2.cvtColor(self.data, cv2.COLOR_RGB2BGR)
                        
            self._logger.log(Logger.INFO, "Saving image in " + self.file_path)
            cv2.imwrite(self.file_path, image);
Beispiel #20
0
def pre_processing(argv):
    '''
    Pre process image files to train detector.
    '''
    # Parses args.
    arg_parser = ArgumentParser(description='Load and plot image.')
    arg_parser.add_argument('-c',
                            '--config',
                            dest='config_file',
                            default='config.ini',
                            help='Configuration file')
    args = vars(arg_parser.parse_args())

    # Parses configuration file.
    config_parser = SafeConfigParser()
    config_parser.read(args['config_file'])
    path_original_positive = config_parser.get('data',
                                               'path_original_positive')
    path_original_negative = config_parser.get('data',
                                               'path_original_negative')
    path_pre_processed_positive = config_parser.get(
        'data', 'path_pre_processed_positive')
    path_pre_processed_negative = config_parser.get(
        'data', 'path_pre_processed_negative')
    path_training_vec = config_parser.get('data', 'path_training_vec')
    total_positive_files = config_parser.get('training',
                                             'total_positive_files')
    image_width = config_parser.get('training', 'image_width')
    image_height = config_parser.get('training', 'image_height')
    training_width = config_parser.get('training', 'training_width')
    training_height = config_parser.get('training', 'training_height')

    # Removing old files.
    logger = Logger()
    logger.log(Logger.INFO, "Removing old files.")
    files = Files(path_pre_processed_positive, True)
    files.remove()
    files = Files(path_pre_processed_negative, True)
    files.remove()
    files = Files(path_training_vec, True)
    files.remove()

    # Pre process positive images.
    logger.log(Logger.INFO, "Pre-processing positive images.")
    positive_images = Files(path_original_positive)

    for file_path in positive_images.paths:
        # Load image and configuration data.
        image = Image(file_path)
        config_data = ConfigData(Quadrilaterals(), file_path)
        config_data.read_data()

        # Convert and equalize image.
        image.convert_to_gray()
        image.equalize()

        # Get file name and extension.
        base_name = os.path.splitext(ntpath.basename(file_path))
        file_name = base_name[0]
        extension = base_name[1]

        index_quadrilateral = 0
        for quadrilateral in config_data.quadrilaterals.data:
            # Crop image using configuration data.
            license_plate = crop_image(image, quadrilateral, int(image_width),
                                       int(image_height))

            # Save image.
            save_image_path = os.path.abspath(path_pre_processed_positive +
                                              file_name + "_" +
                                              str(index_quadrilateral) +
                                              extension)
            license_plate.save(save_image_path)

            index_quadrilateral += 1

    # Pre process negative images.
    logger.log(Logger.INFO, "Pre-processing negative images.")
    negative_images = Files(path_original_negative)

    for file_path in negative_images.paths:
        # Load image.
        image = Image(file_path)

        # Convert and equalize image.
        image.convert_to_gray()
        image.equalize()

        # Get base name.
        base_name = ntpath.basename(file_path)

        # Save image.
        save_image_path = os.path.abspath(path_pre_processed_negative +
                                          base_name)
        image.save(save_image_path)

    # Save detector_training image paths in txt files.
    save_training_images_paths(path_pre_processed_positive,
                               path_pre_processed_negative)

    # Generating samples.
    logger.log(Logger.INFO, "Generating samples.")
    call([
        "perl", 'lib/generate_samples.pl', 'positive.txt', 'negative.txt',
        path_training_vec,
        str(int(total_positive_files) + 500),
        'opencv_createsamples -bgcolor 0 -bgthresh 0 -maxxangle 1.1\ -maxyangle 1.1 maxzangle 0.5 -maxidev 40 -w '
        + str(training_width) + ' -h ' + str(training_height)
    ])

    # Merging data for detector_training.
    path_training_vec_file = "positive.vec"
    logger.log(
        Logger.INFO,
        "Merging data for detector_training in: " + path_training_vec_file)
    merge_vec_files(path_training_vec, path_training_vec_file)
Beispiel #21
0
class Image:
    '''
    Read data in lmdb format.
    '''

    _logger = Logger()
    data = None
    path = None

    def __init__(self, path=None, data=None):
        self._set_image_parameters(path, data)

        if self.path != None:
            self.load(self.path)

    def _set_image_data(self, data):
        '''
        Set image data.
        '''

        # Check if image is in rgb or gray scale
        if len(data.shape) == 3:
            # Image is in rgb.
            self.height, self.width, self.channels = data.shape
        else:
            # Image is in gray scale.
            self.height, self.width = data.shape
            self.channels = 1

        self.data = data

    def _set_image_parameters(self, path=None, data=None):
        '''
        Configure image.
        '''

        if path != None:
            self.path = path

        if data != None:
            self._set_image_data(data)

    def load(self, path):
        '''
        Load image from path.
        '''

        self._set_image_parameters(path=path)
        self.data = np.asarray(PILImage.open(self.path))

        return self.data

    def _configure_plot(self):
        '''
        Configure plot to display image.
        '''

        # Remove warning for Source ID not found.
        # The warning is a issue from matplotlib.
        import warnings
        warnings.simplefilter("ignore")

        # Configure pyplot.
        frame = plt.gca()
        frame.axes.get_xaxis().set_ticklabels([])
        frame.axes.get_yaxis().set_ticklabels([])

        is_gray = len(self.data.shape) < 3

        if is_gray:
            plt.imshow(self.data, cmap=plt.get_cmap("gray"))
        else:
            plt.imshow(self.data)

    def plot(self, image=None):
        '''
        Plot image.
        '''

        self._set_image_parameters(data=image)
        self._configure_plot()
        plt.show()

    def save(self, path=None, image=None):
        '''
        Save image.
        '''

        self._set_image_parameters(data=image, path=path)
        pil_image = PILImage.fromarray(self.data)
        pil_image.save(self.path)

    def get_level_data(self):
        '''
        Get level data from image.
        '''

        is_gray = len(self.data.shape) < 3

        if is_gray:
            data = self.data.reshape(1, len(self.data), len(self.data[0]))
        else:
            data = self.data.transpose(2, 0, 1)

        return data

    def equalize(self, image=None):
        '''
        Equalize image.
        '''

        if image == None and self.data == None:
            self._logger.log("There is no data to equalize image.",
                             Logger.ERROR)
        elif image != None:
            self._set_image_data(image)

        if self.data != None:
            # Convert to gray scale if it image is in rgb.
            if len(self.data.shape) == 3:
                self._logger.log(
                    "We need to convert image to gray scale before equalization.",
                    Logger.DEBUG)
                self.convert_to_gray(self.data)

            # Equalize image.
            self._logger.log("Equalizing image.")
            self.data = cv2.equalizeHist(self.data)

        return self.data

    def binarize(self, image=None):
        '''
        Binarize image.
        '''

        if image == None and self.data == None:
            self._logger.log("There is no data to binarize image.",
                             Logger.ERROR)
        elif image != None:
            self._set_image_data(image)

        if self.data != None:
            # Convert to gray scale if it image is in rgb.
            if len(self.data.shape) == 3:
                self._logger.log(
                    "We need to convert image to gray scale before binarizing image.",
                    Logger.DEBUG)
                self.convert_to_gray(self.data)

            # Equalize image.
            self._logger.log("Binarizing image.")
            self.data = cv2.medianBlur(self.data, 5)
            self.data = cv2.adaptiveThreshold(self.data, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
            cv2.THRESH_BINARY, 11, 2)

        return self.data

    def convert_to_gray(self, image=None):
        '''
        Convert rgb to gray.
        '''

        if image == None and self.data == None:
            self._logger.log("There is no data to convert image.",
                             Logger.ERROR)
        elif image != None:
            self._set_image_data(image)

        if self.data != None:
            # Convert image only if it is in rgb.
            if len(self.data.shape) == 3:
                self._logger.log("Converting image to gray scale.")
                self.data = cv2.cvtColor(self.data, cv2.COLOR_RGB2GRAY)
            else:
                self._logger.log("Image is already in gray scale.")

            self.channels = 1

        return self.data

    def resize(self, width, height=0, image=None):
        '''
        Resize image.
        '''

        if image == None and self.data == None:
            self._logger.log("There is no data to resize image.", Logger.ERROR)
        elif image != None:
            self._set_image_data(image)

        if height == 0:
            r = width * 1.0 / self.data.shape[1]
            height = int(self.data.shape[0] * r)

        self._logger.log("Resizing image to: width = " + str(width) +
                         " height = " + str(height))
        resized = cv2.resize(self.data, (width, height),
                             interpolation=cv2.INTER_AREA)
        return Image(data=resized)

    def crop(self, origin, end, image=None):
        '''
        Crop image.
        '''

        if image == None and self.data == None:
            self._logger.log("There is no data to crop image.", Logger.ERROR)
        elif image != None:
            self._set_image_data(image)

        if self.data != None:
            # Correct parameters.
            if origin.x >= self.width:
                origin.x = self.width - 1
            elif origin.x < 0:
                origin.x = 0

            if end.x >= self.width:
                end.x = self.width - 1
            elif end.x < 0:
                end.x = 0

            if origin.y >= self.height:
                origin.y = self.height - 1
            elif origin.y < 0:
                origin.y = 0

            if end.y >= self.height:
                end.y = self.height - 1
            elif end.y < 0:
                end.y = 0

            if origin.x > end.x:
                change = end.x
                end.x = origin.x
                origin.x = change

            if origin.y > end.y:
                change = end.y
                end.y = origin.y
                origin.y = change

            self._logger.log("Cropping image. Origin: (%d, %d) End: (%d, %d)" \
                % (origin.x, origin.y, end.x, end.y))
            return Image(data=self.data[origin.y:end.y, origin.x:end.x])

    def convert_to_hsv(self, image=None):
        '''
        Convert rgb to hsv.
        '''

        if image is None and self.data is None:
            self._logger.log("There is no data to convert image.",
                             Logger.ERROR)
        elif image is not None:
            self._set_image_data(image)

        if self.data is not None and len(self.data.shape) == 3:
            self.data = cv2.cvtColor(self.data, cv2.COLOR_RGB2HSV)

        return self.data

    def equalize_clahe(self, image=None):
        '''
        Equalize image.
        '''

        if image == None and self.data == None:
            self._logger.log("There is no data to equalize image.",
                             Logger.ERROR)
        elif image != None:
            self._set_image_data(image)

        if self.data != None:
            # Equalize image.
            self._logger.log("Equalizing image.")
            clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))

            if len(self.data.shape) == 3:
                self.data = cv2.cvtColor(self.data, cv2.COLOR_RGB2YCR_CB)
                data = np.zeros(self.data.shape)

                data[:, :, 0] = clahe.apply(self.data[:, :, 0])
                data[:, :, 1] = clahe.apply(self.data[:, :, 1])
                data[:, :, 2] = clahe.apply(self.data[:, :, 2])
                self.data = cv2.cvtColor(data.astype(np.uint8),
                                         cv2.COLOR_YCR_CB2RGB)
            else:
                self.data = clahe.apply(self.data)

        return self.data
Beispiel #22
0
def crop_image(image, quadrilateral, width_dest, height_dest):
    '''
    Crop image using parameters of quadrilateral.
    '''
    # Find crop parameters.
    logger = Logger()
    logger.log(Logger.INFO, "Finding crop parameters.")
    origin = Point(image.width, image.height)
    end = Point(0, 0)

    for point in quadrilateral.points:
        if point.x > end.x:
            end.x = point.x

        if point.y > end.y:
            end.y = point.y

        if point.x < origin.x:
            origin.x = point.x

        if point.y < origin.y:
            origin.y = point.y

    # Adjust dimensions to height be 20% of width.
    scale = height_dest * 1.0 / width_dest
    height = end.y - origin.y
    width = end.x - origin.x

    # Check if we will change height or width.
    if width * scale < height:
        # We need to increase width.
        increment = height * 2.5 - width
        end.x += increment / 2

        if end.x >= image.width:
            increment -= image.width - 1 - end.x
            end.x = image.width - 1
        else:
            increment /= 2

        origin.x -= increment

        if origin.x < 0:
            increment = -origin.x
            origin.x = 0
            end.x += increment

            if end.x >= image.width:
                end.x = image.width - 1

        logger.log(Logger.INFO,
                   "Increasing width in " + str(end.x - origin.x - width))
    else:
        # We need to increase height.
        increment = width * scale - height
        end.y += increment / 2

        if end.y >= image.height:
            increment -= image.height - 1 - end.y
            end.y = image.height - 1
        else:
            increment /= 2

        origin.y -= increment

        if origin.y < 0:
            increment = -origin.y
            origin.y = 0
            end.y += increment

            if end.y >= image.height:
                end.y = image.height - 1

        logger.log(Logger.INFO,
                   "Increasing height in " + str(end.y - origin.y - height))

    # Crop image using points from configuration data.
    cropped = image.crop(Point(origin.x, origin.y), Point(end.x, end.y))

    # Resize image.
    resized = cropped.resize(width_dest, height_dest)

    return resized
Beispiel #23
0
from models.path import get_obj_path
from models.logger import Logger
import os
import sys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
import subprocess
from screenshot import Screenshot

model = None
ifInstallMonkey = None
ifRunMonkey = None

logger = Logger(logger="function").getlog()

sep = os.path.sep  # 当前系统分隔符
timeout = 20
PATH = lambda p: os.path.abspath(os.path.join(os.path.dirname(__file__), p))

path = get_obj_path()


def excepTion(function):
    """
    异常装饰器,用来替代try except
    """
    def wrapper(*args, **keyargs):
        try:
            return function(*args, **keyargs)
Beispiel #24
0
def main(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    Dataset = get_dataset(opt.dataset, opt.task)
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
    logger = Logger(opt)

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    start_epoch = 0
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(model, opt.load_model,
                                                   optimizer, opt.resume,
                                                   opt.lr, opt.lr_step)

    train_loader = torch.utils.data.DataLoader(Dataset(opt, 'train'),
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.num_workers,
                                               pin_memory=True,
                                               drop_last=True)
    val_loader = torch.utils.data.DataLoader(Dataset(opt, 'val'),
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=1,
                                             pin_memory=True)
    trainer = CtTrainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
    TASK = 'ctdet'  # or 'multi_pose' for human pose estimation
    best_val_loss = 1e10
    best_ap = 1e-10
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch
        log_dict_train, _ = trainer.train(epoch, train_loader)
        print("log_dict_train is: ", log_dict_train)
        # logger.write('epoch: {} |'.format(epoch))
        # for k, v in log_dict_train.items():
        #     logger.scalar_summary('train_{}'.format(k), v, epoch)
        #     logger.write('{} {:8f} | '.format(k, v))
        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
            with torch.no_grad():
                log_dict_val, preds = trainer.val(epoch, val_loader)
                val_loader.dataset.run_eval(preds, opt.save_dir)
                result_json_pth = os.path.join(opt.save_dir, "results.json")
                anno_json_pth = '/home/pcl/pytorch_work/my_github/centernet_simple/data/dianli/annotations/test.json'
                ap_list, map = trainer.run_epoch_voc(result_json_pth,
                                                     anno_json_pth,
                                                     score_th=0.01,
                                                     class_num=opt.num_classes)
                print(ap_list, map)
            # for k, v in log_dict_val.items():
            #     logger.scalar_summary('val_{}'.format(k), v, epoch)
            #     logger.write('{} {:8f} | '.format(k, v))
            if log_dict_val[opt.metric] <= best_val_loss:
                best_val_loss = log_dict_val[opt.metric]
                save_model(
                    os.path.join(
                        opt.save_dir, 'model_best_val_loss_' +
                        str(round(best_val_loss, 2)) + '.pth'), epoch, model)
            if map > best_ap:
                best_ap = map
                save_model(
                    os.path.join(
                        opt.save_dir,
                        'model_best_map_' + str(round(best_ap, 2)) + '.pth'),
                    epoch, model)
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'), epoch,
                       model, optimizer)
        # logger.write('\n')
        if epoch in opt.lr_step:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr