Beispiel #1
0
def main(stream_url, image_path, location):
    print(stream_url, image_path, location)

    cfg = ConfigParser()
    cfg.read(str(CFG_PATH))
    token, chat_id = cfg['tg']['token'], cfg['tg']['chat_id']
    password, water = cfg['enc']['password'], cfg['enc']['water']
    ip = IP_RE.findall(stream_url)[0]
    water = '%s %s' % (eip4(ip), water)

    url = 'https://api.telegram.org/bot%s/sendphoto' % token

    data = dict(chat_id=chat_id,
                caption='```%s```' % stream_url,
                parse_mode='Markdown')

    with open(image_path, 'rb') as f:
        img: Image = Image.open(f)
        w, h = img.size

        text = ' '.join(location)
        print(w, h)
        font_size = int(0.04 * min(h, w))
        px = int(0.005 * w)
        py = int(0.002 * h)
        font = ImageFont.truetype(str(FONT_PATH), font_size)
        draw = ImageDraw.Draw(img, 'RGBA')
        _, text_height = draw.textsize('Wg', font)
        water_width, _ = draw.textsize(water, font)

        text_y = h - py - text_height

        draw.rectangle((0, text_y - py, w, h), fill=(0, 0, 0, 160))

        draw.text((px, text_y), text, fill='yellow', font=font)
        draw.text((w - px - water_width, text_y),
                  water,
                  fill=(255, 255, 255, 128),
                  font=font)

        img_byte_arr = io.BytesIO()
        img.save(img_byte_arr, format='PNG')
        img_byte_arr = img_byte_arr.getvalue()

        response = requests.post(url, data=data, files={'photo': img_byte_arr})

    print(response.json())
Beispiel #2
0
def get_code(request):
    mode = 'RGD'
    size = (200, 100)

    def _get_color():
        return random.randrange(255)

    def _generate_code():
        source = 'asdjcfboiawuehrbgtfoui21345asdcasdc'
        code = ''
        for i in range(4):
            code += random.choice(source)
        return code

    red = _get_color()
    green = _get_color()
    blue = _get_color()
    color_bg = (red, green, blue)

    image = Image.new(mode=mode, size=size, color=color_bg)
    image_draw = ImageDraw(image, mode=mode)
    image_font = ImageFont.truetype(settings.FONT_PATH, 100)

    verify_code = _generate_code()

    request.session['verify_code'] = verify_code

    for i in range(len(verify_code)):
        image_draw.text(xy=(40 * i, 0), text=verify_code[i], font=image_font)

    for i in range(1000):
        fill = (_get_color(), _get_color(), _get_color())
        xy = (random.randrange(201), random.randrange(100))
        image_draw.point(xy=xy, fill=fill)

    fp = BytesIO()
    image.save(fp, 'png')

    return HttpResponse(fp.getvalue(), content_type='img/png')
Beispiel #3
0
def _create_image_for_pair(rows, model_m, pair):
    length = len(rows)
    if length > 10:
        return

    w, h = 480, 480
    distance = 30
    interval = int((w - distance) / (length + 3))  # Интервал между стрелок

    if abs(float(rows[0][1])) > abs(float(rows[length - 1][1])):
        max = float(rows[0][1])

    else:
        max = float(rows[length - 1][1])

    min_value = str(round(rows[-1][1], 2))

    h_scale = h - 80  # ДЛя графиков чтобы оставить место подписи

    for row in rows:
        if (max < 0 and float(row[1]) > 0):
            row[1] = int((float(row[1])) * (h_scale / 2 - 10) / max * (-1))
        elif (max < 0 and float(row[1]) < 0):
            row[1] = int((float(row[1])) * (h_scale / 2 - 10) / max * (-1))
        else:
            row[1] = int((float(row[1])) * (h_scale / 2 - 10) / max)

    im = Image.new('RGB', (w, h), (195, 197, 200))
    na = np.array(im)

    h_begin = int(h_scale / 2)
    # Оси
    na = cv2.arrowedLine(na, (3, h_begin), (w - 5, h_begin), (0, 0, 0), 4)
    na = cv2.arrowedLine(na, (distance, h - 50), (distance, 5), (0, 0, 0), 4)

    h_end = int(h_scale / 2 - (rows[-1][1]))
    na = cv2.line(na, (15, h_end), (45, h_end), (0, 0, 0), 4)

    distance += interval * 2
    for row in rows:

        h_end = int(h_scale / 2 - (row[1]))
        na = cv2.arrowedLine(na, (distance, h_begin), (distance, h_end),
                             (0, 0, 0), 4)
        distance += interval

    path = f'{MEDIA_ROOT}/files/models/{str(model_m.model.id)}/original_snod/modification/{str(model_m.id)}/images/{str(pair.id)}.png'
    Image.fromarray(na).save(path)

    # Делаем подписи
    img = Image.open(path)
    idraw = ImageDraw.Draw(img)
    path_font = f'{MEDIA_ROOT}/fonts/9041.ttf'
    font = ImageFont.truetype(path_font, size=18)

    distance = 30
    distance += interval * 2
    for row in rows:
        text = str((row[0]) + 1)
        if float(row[1]) > 0:
            idraw.text((distance, int(h_scale / 2 + 50)),
                       text,
                       font=font,
                       fill='#000000')
        elif float(row[1]) == 0:
            idraw.ellipse(
                [distance - 10, h_begin - 10, distance + 10, h_begin + 10],
                fill='#000000')
            idraw.text((distance, int(h_scale / 2 - 50)),
                       text,
                       font=font,
                       fill='#000000')
        else:
            idraw.text((distance, int(h_scale / 2 - 50)),
                       text,
                       font=font,
                       fill='#000000')
        distance += interval

    text = pair.option_1.name
    length = len(text) * 9
    idraw.text((w - 15 - length, h - 40),
               pair.option_2.name,
               font=font,
               fill='#000000')
    idraw.text((15, h - 40), text, font=font, fill='#000000')

    idraw.text((w - 45, h / 2), 'Ox', font=font, fill='#000000')
    idraw.text((60, 15), 'Oy', font=font, fill='#000000')

    # Подписываем риски
    h_end = int(h_scale / 2 - (rows[-1][1]))

    idraw.text((45, h_end), min_value, font=font, fill='#000000')

    img.save(path)
#coding=utf-8
from PIL.ImageFont import ImageFont
from gevent.libev.corecext import sys

import HyperLPRLite as pr
import cv2
import numpy as np
import imp
imp.reload(sys)
fontC = ImageFont.truetype("Font/platech.ttf", 14, 0)

# 从本地读取图片并做识别,返回所有识别到车牌的【识别结果,置信度,位置】
# smallest_confidence:最小置信度
def recognize_plate(image, smallest_confidence = 0.7):
    model = pr.LPR("model/cascade.xml", "model/model12.h5", "model/ocr_plate_all_gru.h5") #训练好的模型参数
    model.SimpleRecognizePlateByE2E(image)
    return_all_plate = []
    for pstr,confidence,rect in model.SimpleRecognizePlateByE2E(image):
        if confidence>smallest_confidence:
            return_all_plate.append([pstr,confidence,rect])  #返回车牌号,车牌位置,置信度
    return return_all_plate

test_image = cv2.imread("Images/16.jpg") #读入图片
print(recognize_plate(test_image)) #打印结果,车牌号,车牌位置,置信度