Exemplo n.º 1
0
def send_user_Msg(command, data=None):
    import telepot
    global owner_chat_id
    bot = telepot.Bot(my_bot_token)

    if command == 'failed_attempts':
        bot.sendMessage(owner_chat_id, '3 failed attempts have been detected!')
        bot.sendPhoto(owner_chat_id, photo=open("/home/pi/motion_captures/" + camera(dateTimefunc()), 'rb'))
        print('Latest picture has been sent.')
    elif command == 'success':
        bot.sendMessage(owner_chat_id, data + ' is at home.')
    elif command == 'uauth_user':
        bot.sendMessage(owner_chat_id, 'Unauthorised user of ' + data + '\'s RFID card!')
        bot.sendPhoto(owner_chat_id, photo=open("/home/pi/motion_captures/" + camera(dateTimefunc()), 'rb'))
Exemplo n.º 2
0
def rfid_checker():
    global prog_run
    print "checking rfid\n"
    lcd_display(["Please present", "RFID card"])
    attempt = 0
    username = None
    success = 'No'
    date_time = ''
    attempt_img = ''
    while username == None or str(username).find(
            '[]') != -1 or success == 'No':
        # check for number of attempts
        # if over 3 failed attempts ring buzzer
        if attempt < 4:
            attempt = attempt + 1
            rfid_id = rfid_reader()
            # call motion_sensor function if no card in 60 seconds
            if rfid_id == "timeout":
                print("rfid read timeout!")
                prog_run = False
                break
            else:
                date_time = str(datetime.now().replace(microsecond=0)).replace(
                    " ", "_")
                username = query_rfid(rfid_id)
                print username
                if username == None or str(username).find('[]') != -1:
                    if attempt < 4:
                        lcd_display(["Please try", "again!"])
                else:
                    lcd_display(["Please wait.", "Scanning face."])
                    attempt_img = camera(date_time)
                    s3_imgstore(attempt_img)
                    sleep(10)
                    face_matched, response = s3RekogniseFace(
                        username, attempt_img)
                    if face_matched:
                        success = 'Yes'
                        print("User " + username +
                              " has authenticated successfully!")
                    else:
                        lcd_display(["Unauthorised", "User!"])
                        print("WARNING - UNAUTHORIZED USER is using " +
                              username + "\'s card! ")
                        telebotalert("uauth_user", username)
                attempt_logger(attempt_img, date_time, rfid_id, username,
                               success)
        elif attempt == 4:
            telebotalert("failed_attempts", '')
            lcd_display(["UNAUTHORIZED!", "ALERT TRIGGERED!"])
            buzzer()  # make buzzer sound
            prog_run = False
            break
    print "username is => " + str(username)
    if success == 'Yes':
        ledControl('off', 'on')
        telebotalert("success", username)
        lcd_display(["Welcome home", str(username) + "!"])
        sleep(5)
        prog_run = False
Exemplo n.º 3
0
def add_motion_to_db(light_val, motion_sig, date_time):
    print "adding motion to db\n"
    motion_img = camera(date_time)
    message = {
        "deviceid": deviceid,
        "motion_datetime": date_time,
        "lightvalue": light_val,
        "motionvalue": motion_sig,
        "motion_image": motion_img
    }
    publisher("sensors/lightmotionvalimg", message)
Exemplo n.º 4
0
def main():
    mouseController = MouseController()

    with camera() as cam:
        while True:
            ok, read_ = cam.read()
            if not ok:
                break

            frame = cv2.flip(read_, 1)
            roi = frame[ROI_SLICE].copy()

            key = cv2.waitKey(2)
            if key == ord('r'):
                print('Background reset')
                setBackground(roi)
                continue

            elif key in (27, ord('q')):
                print('Exit')
                break

            mask = getMask(roi)
            contour = getHandContour(roi, mask)
            if contour is None or cv2.contourArea(contour) < MIN_HAND_SIZE:
                continue

            bottommost, leftmost, topmost, _rightmost = getHandPosition(
                contour)
            drawHandPositions(roi, bottommost, leftmost, topmost)
            angle = getAngle(bottommost, leftmost, topmost)
            if angle is None:
                continue

            mouseController.performActions(roi, topmost, angle)
            showResult(frame, roi)
Exemplo n.º 5
0
camera_rotation_n_frames = 100

# ─── TITLE ─────────────────────────────────────────────────────────────────────

print("STAGE: title")

stage_title = bpy.data.objects.new("stage_title", None)
stage_title.rotation_euler = Vector((0, 0, np.pi / 2))
stage_title.location = Vector((-20, 0, 0))
scene.objects.link(stage_title)

stage_title["start"] = 0

# add camera and initially look at title stage
camera = utils.camera((0, 0, 0), lens=20)
utils.look_at(camera, stage_title)
camera.keyframe_insert("rotation_euler", frame=0)
camera.keyframe_insert("location", frame=0)
camera.data.keyframe_insert("lens", frame=0)

run("01-title.py")

if "end" not in stage_title: stage_title["end"] = stage_title["start"]
camera.keyframe_insert("rotation_euler", frame=stage_title["end"])

# ─── EMBEDDINGS ─────────────────────────────────────────────────────────────────

print("STAGE: embeddings")

# create stage
Exemplo n.º 6
0
import bpy
import numpy as np
import pandas as pd

import json

import funky
import utils

### PREPARING THE DATA ###


### SETTING THE SCENE ###

# Create lamp
target = utils.target((2/3 * w_overall, h_overall/2, d_overall*3.4/4))
lamp = utils.lamp((w_overall + 10, -20, 50), target=target, type='AREA', shadow = True)
lamp.data.distance = 0.02
lamp.data.energy = 0.0001
lamp.scale = [100, 100, 1]

# Choose either camera
# orthographic camera
# target = utils.target((w_overall/2, h_overall/2, d_overall*3.4/4))
# camera = utils.camera((3/4 * w_overall, -30, 50), target = target, type='ORTHO', ortho_scale=w_overall * 1.2)

# perspective camera
target = utils.target((w_overall/2, h_overall/2, d_overall*3.4/4))
camera = utils.camera((3/4 * w_overall, -30, 50), target = target, lens = 20)
Exemplo n.º 7
0
                        help='\
                        first index refers to gender model, second index refers\
                        to expression model, and third index refers to multiple models'
                        )

    return parser.parse_args()


args = args()

show = args.show
tracking = args.tracking

inputs_path = args.inputs_path
outputs_path = args.outputs_path
models_path = args.models_path

delay = args.delay
bool_gender, bool_expression, bool_multiple = args.models

cam_num = args.cam_num

video_name = args.video_name

if inputs_path == "":
    camera(cam_num, outputs_path, video_name, models_path, bool_expression,
           bool_gender, bool_multiple, show, tracking)
else:
    input_dir(inputs_path, outputs_path, models_path, bool_expression,
              bool_gender, bool_multiple, delay, show, tracking)
Exemplo n.º 8
0
import cv2
import numpy as np

from utils import camera, setBackground

with camera() as _cam:
    bg = cv2.flip(_cam.read()[1], 1)
    w = np.shape(bg)[1]
    h = np.shape(bg)[0]

DEBUG = True
SCREEN_RESOLUTION = 1920, 1080

# RECT (top left: x, y, bottom right: x, y)
SCREEN_RECT = (300, 1, w - 40, h - 280)
ROI_RECT = (250, 1, w - 1, h - 200)

HAND_WIDTH = 80
BOTTOM_OFFSET = 25
MIN_HAND_SIZE = 90
MAX_CLICK_ANGLE = 15

ROI_SLICE = slice(ROI_RECT[1], ROI_RECT[3]), slice(ROI_RECT[0], ROI_RECT[2])
sx, sy = SCREEN_RESOLUTION


class Colors:
    YELLOW = (0, 255, 255)
    BLUE = (255, 0, 0)
    ORANGE = (0, 120, 255)
    MAGENTA = (255, 0, 255)
Exemplo n.º 9
0
    metaball.resolution = 0.2
    metaball.render_resolution = 0.05

    for i in range(n):
        location = Vector(origin) + Vector(
            random.uniform(-r0, r0) for i in range(3))

        element = metaball.elements.new()
        element.co = location
        element.radius = r1

    return metaball


if __name__ == '__main__':
    # Remove all elements
    utils.removeAll()

    # Create camera
    target = utils.target()
    camera = utils.camera((-10, -10, 10), target)

    # Create lamps
    utils.rainbowLights(10, 300, 3)

    # Create metaball
    metaball = createMetaball()

    # Render scene
    utils.renderToFolder('rendering', 'metaballs', 500, 500)
Exemplo n.º 10
0
def respondToMsg(msg):
    print(msg)
    file_id = ''
    chat_id = msg['chat']['id']
    command = (msg.get('text', 'msg_is_img')).lower()
    if command == 'msg_is_img':
        file_id = msg['photo'][0]['file_id']
    global display
    global guest_chat_id
    global proxy_chat
    global proxy_chat_status
    global owner_chat_id

    print('Got chat_id: {}'.format(chat_id))
    print('Got command: {}'.format(command))

    if 'Active' in proxy_chat_status:
        if chat_id == guest_chat_id:
            if 'exit()' in command:
                bot.sendMessage(owner_chat_id, 'Guest has left the chat.')
                bot.sendMessage(guest_chat_id, 'You have left the chat.')
                proxy_chat = False
                proxy_chat_status = 'Inactive'
                guest_chat_id = 0
            else:
                bot.sendMessage(owner_chat_id, command)
        elif chat_id == owner_chat_id:
            if 'exit()' in command:
                bot.sendMessage(guest_chat_id, 'Owner has left the chat.')
                bot.sendMessage(owner_chat_id, 'You have left the chat.')
                proxy_chat = False
                proxy_chat_status = 'Inactive'
                guest_chat_id = 0
            elif 'guestid' in command:
                bot.sendMessage(owner_chat_id, 'Current guest chat id is {}'.format(guest_chat_id))
            else:
                bot.sendMessage(guest_chat_id, command)
    elif proxy_chat == True and 'Inactive' in proxy_chat_status:
        if 'yes' in command.lower():
            bot.sendMessage(owner_chat_id, 'Connecting you to guest now...\n'+
            'To end convo with guest, please type \'exit()\'.\n'+
            'Type guestid to get the chat id of the current guest.')
            bot.sendMessage(guest_chat_id, 'Owner has accepted your chat request.\n'+
            'To end convo with owner, please type \'exit()\'.')
            proxy_chat_status = 'Active'
        elif 'no' in command.lower():
            bot.sendMessage(owner_chat_id, 'Communication request by guest declined.')
            bot.sendMessage(guest_chat_id, 'Communication request declined by owner.')
            guest_chat_id = 0
            proxy_chat = False
    # display help message to users
    elif command == '/help':
        bot.sendMessage(chat_id, 'Hello!\nI am a surveillance camera bot')
        if chat_id == owner_chat_id:
            bot.sendMessage(owner_chat_id, 'Here is a list of available commands you can ask me to do:\n'+
            '/help - displays this help message.\n'+
            'pic / picture / image - takes a real time picture.\n'+
            'display / lcd - displays a message on the lcd screen.\n'+
            'alarm - sounds the alarm.\n'+
            'chat_id - gets your chat id with the bot.')
        else:
            bot.sendMessage(chat_id, 'Here is a list of available commands you can ask me to do:\n'+
            'chat_id - gets your chat id with the bot.\n'+
            'talk / owner / msg / message - talk to the owner.')
    # for all the owner allowed commands
    elif chat_id == owner_chat_id:
        if 'pic' in command or 'picture' in command or 'image' in command:
            print('Taking an image')
            bot.sendPhoto(owner_chat_id, photo=open("/home/pi/motion_captures/" + camera(dateTimefunc()), 'rb'))
            bot.sendMessage(owner_chat_id, 'This is the real time ' + command)
        # for owner to display text on lcd with text split into 2 lines using newline
        # as a delimiter
        elif display == True:
            txt = command.split("\n")
            if len(txt) == 1:
                txt.append('')
            if 'exit()' in txt[0]:
                display = False
                bot.sendMessage(owner_chat_id, 'Exited lcd display mode.')
            else:
                lcd_display(txt)
        # for owner to display text on lcd display
        elif 'display' in command or 'lcd' in command:
            bot.sendMessage(owner_chat_id,
            'Please enter what you want to say to the visitor.\n' +
            'Only 16 characters per line; Maximum of 2 lines.\n' +
            'Press the enter key to split your strings into two lines\n' +
            'Eg.\n Hello\nWorld\n'+
            'Type exit() to exit sending text to lcd display.')
            display = True
        # for owner to sound the alarm
        elif 'alarm' in command:
            bot.sendMessage(owner_chat_id, 'Sounding the alarm now!\n'+
            'Please wait for alarm to stop ringing before inputting any more commands.')
            buzzer()
            bot.sendMessage(owner_chat_id, 'Alarm has finished ringing.')
        elif ('talk' in command or "owner" in command or 'msg' in command or 'message' in command):
            bot.sendMessage(owner_chat_id, 'Displaying proxy chat instructions to guest')
            for x in range(2):
                lcd_display(['Hello guest!', 'Please use the'])
                sleep(2)
                lcd_display(['Telegram bot', 'monicam_bot'])
                sleep(2)
                lcd_display(['to communicate', 'with the owner'])
                sleep(2)
                lcd_display(['Start the bot', 'and type chat'])
                sleep(2)
                lcd_display(['to chat with', 'the owner'])
                sleep(2)
                lcd_display(['clear'])
        elif 'chat_id' in command:
            bot.sendMessage(chat_id, 'Your chat id is {}'.format(chat_id))
        # for all the other commands not here
        else:
            bot.sendMessage(chat_id, 'Unknown command. Please try again.\n' +
            'For a list of available commands, type /help.')
    # for getting the chat id of the current user
    elif 'chat_id' in command:
        bot.sendMessage(chat_id, 'Your chat id is {}'.format(chat_id))
    # for guest to communicate with the owner
    elif ('talk' in command or "owner" in command or 'msg' in command or 'message' in command) and chat_id != owner_chat_id:
        # if chat_id == owner_chat_id:
        #     bot.sendMessage(owner_chat_id, 'Enter the chat id of the guest.')
        guest_chat_id = chat_id
        bot.sendMessage(owner_chat_id, 'guest with chat_id ' + str(guest_chat_id) +
        ' wants to talk to you. Do you accept or not? (Yes or No)')
        proxy_chat = True
    # for all the other commands not here
    else:
        bot.sendMessage(chat_id, 'Unknown command. Please try again.\n' +
        'For a list of available commands, type /help.')
    bpy.context.scene.objects.link(obj)
    bpy.context.scene.update()

    # Create and assign materials to object
    for color in colors:
        mat = bpy.data.materials.new('Material')
        mat.diffuse_color = convert_hsv(color)
        mat.diffuse_intensity = 0.9
        obj.data.materials.append(mat)


if __name__ == '__main__':
    print(__file__)

    # Remove all elements
    utils.removeAll()

    # Create object
    voronoi_landscape()

    # Create camera and lamp
    target = utils.target((0, 0, 3))
    utils.camera((-8, -12, 11), target, type='ORTHO', ortho_scale=5)
    utils.lamp((10, -10, 10), target=target, type='SUN')

    # Enable ambient occlusion
    utils.setAmbientOcclusion(samples=10)

    # Render scene
    utils.renderToFolder('rendering', 'vornoi_landscape', 500, 500)
Exemplo n.º 12
0
    bm = bmesh.new()
    tetrahedronBasePoints = tetrahedronPoints(5)
    recursiveTetrahedron(bm, tetrahedronBasePoints, level=4)

    # Create obj and mesh from bmesh object
    me = bpy.data.meshes.new("TetrahedronMesh")
    bm.to_mesh(me)
    bm.free()
    obj = bpy.data.objects.new("Tetrahedron", me)
    bpy.context.scene.objects.link(obj)
    bpy.context.scene.update()


    # Create camera and lamp
    target = utils.target((0, 0, 1))
    utils.camera((-8, 10, 5), target, type='ORTHO', ortho_scale=10)
    utils.lamp((10, -10, 10), target=target, type='SUN')

    # Enable ambient occlusion
    utils.setAmbientOcclusion(samples=10)

    # Select colors
    palette = [(181,221,201), (218,122,61)]
    palette = [utils.colorRGB_256(color) for color in palette]  # Adjust color to Blender

    # Set background color of scene
    bpy.context.scene.world.horizon_color = palette[0]

    # Set material for object
    mat = utils.simpleMaterial(palette[1])
    obj.data.materials.append(mat)