def __init__(self, type, src, w=None, h=None):
        self.type = type
        if type == 'vid':
            self.w = w
            self.h = h

        self.src = src

        range = ([0, 100, 100], [40, 255, 255])
        self.Detect = Detect(range)
def recognize(people_folder):
    try:
        people = [person for person in os.listdir(people_folder)]
    except:
        print "No people in system"
        sys.exit()

    detector = Detect('haarcascade_frontalface_alt.xml')

    recognizer = cv2.face.LBPHFaceRecognizer_create()

    images = []
    labels = []
    labels_people = {}
    for i, person in enumerate(people):
        labels_people[i] = person
        for image in os.listdir(people_folder + person):
            images.append(cv2.imread(people_folder + person + '/' + image, 0))
            labels.append(i)
    try:
        recognizer.train(images, np.array(labels))
    except:
        sys.exit()

    video = Camera()
    getFace(video, detector, recognizer)
class TestDetection():
    def __init__(self, type, src, w=None, h=None):
        self.type = type
        if type == 'vid':
            self.w = w
            self.h = h

        self.src = src

        range = ([0, 100, 100], [40, 255, 255])
        self.Detect = Detect(range)

    def detect(self, show=False):
        if self.type == 'img':
            self.detect_img(show)
            pass
        if self.type == 'vid':
            self.detect_vid(show)

    def detect_img(self, show):
        frame = self.Detect.detect_frame(self.src)
        if show:
            cv2.imshow('frame', frame)
            cv2.waitKey(0)
        return frame

    def detect_vid(self, show):
        disp = Display(self.w * 2, self.h)
        print('Starting...')

        i = 0
        fps = FPS().start()
        while self.src.more():
            frame = self.src.read()
            if frame is None:
                break
            d_frame = self.Detect.detect_frame(frame)
            disp.paint(d_frame)
            fps.update()
            time.sleep(0.01)
            fps.stop()
            print('fps: %f at frame %i' % (fps.fps(), i))
            i += 1
        cv2.destroyAllWindows()
        self.src.stop()
Example #4
0
def foodSense():
    print('Starting Food Sense')

    # Begin initializing necessary components)
    fb = Firebase()
    detect = Detect(fb)
    monitor = Monitoring(fb)
    scale = Scale()

    # Set scale calibration
    scale.setReferenceUnit(-25.725)
    scale.reset()
    scale.tare()

    ### START DEBUG ###
    ### END DEBUG ###

    ### MAIN LOOP ###
    while True:
        while monitor.powerOn:
            print('Power is on')
            time.sleep(1)
            
            while monitor.doorClosed():
                print('Door is closed')
                monitor.checkTemp()
                time.sleep(1)

                if monitor.doorOpen():
                    print('Door was opened')
                    monitor.startDoorTimer()
                    
                    while monitor.doorOpen():
                        print('Waiting for door to close')
                        monitor.checkDoorTimer()
                        monitor.checkTemp()
                        time.sleep(1)
                    else:
                        print('Door was closed')

                        scale.getWeight()
                        detect.getImage()
                        detect.detectItem()
                        detect.parseResponse(scale.weight)
                else:
                    pass
            else:
                print('Door must be closed on program startup')
        else:
            monitor.powerSave()
    else:
        pass
Example #5
0
def filter(r,addr):
    """检测web攻击,返回检测结果"""
    #uri黑白名单检测
    uri = r.uri.split('?')[0]
    if WHITE_URI_SWITCH:
        if uri not in WHITE_URI_LIST:
            return {"status":True, "type":'not-white-uri'}
    if uri in BLACK_URI_LIST and not WHITE_URI_SWITCH:
        return {"status":True, "type":'in-black-uri'}
    
    #规则匹配
    det_data = Detect(r)
    result = det_data.run()

    #ip白名单允许连接
    if result["status"] and WHITE_IP_SWITCH:
        if addr[0] in WHITE_IP_LIST:
            return {"status":False}

    return result
Example #6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--coords',
                        '--coords',
                        type=str,
                        required=True,
                        nargs='*',
                        action='append')
    parser.add_argument('--type',
                        '--type',
                        type=str,
                        required=True,
                        choices=["train", "detect"])
    parser.add_argument('--train_id', '--train_id', type=str, required=False)
    args = parser.parse_args()

    # The train_id variable is a hash of  min_lat, min_lon, max_lat, max_lon.
    # It allows different training sets to be run and stored seperately
    if args.train_id is None:
        if args.type == 'detect':
            logger.error(
                'train_id must be set to the ID printed out at the training stage'
            )
            sys.exit()
        hash_object = hashlib.md5(str(args.coords))
        train_id = hash_object.hexdigest()
    else:
        train_id = args.train_id

    logger.info('Using training ID: %s' % train_id)

    initStorageManager(train_id)

    # Loop through each GPS coordinate set provided
    if args.type == 'train':
        train = Train()
        train.processTiles(args.coords)
    if args.type == 'detect':
        detect = Detect()
        detect.processTiles(args.coords)
Example #7
0
    def __init__(self, phase, cfg):
        super(SSD, self).__init__()

        self.phase = phase  # train or inferenceを指定
        self.num_classes = cfg["num_classes"]  # クラス数=21

        # SSDのネットワークを作る
        self.vgg = make_vgg()
        self.extras = make_extras()
        self.L2Norm = L2Norm()
        self.loc, self.conf = make_loc_conf(cfg["num_classes"],
                                            cfg["bbox_aspect_num"])

        # DBox作成
        dbox = DBox(cfg)
        self.dbox_list = dbox.make_dbox_list()

        # 推論時はクラス「Detect」を用意します
        if phase == 'inference':
            self.detect = Detect()
Example #8
0
def decode_numpy_array(data, d: Detect):
    arr = np.frombuffer(data, np.uint8)
    try:
        img = cv2.imdecode(arr, cv2.IMREAD_COLOR)
    except (cv2.error, UnicodeDecodeError) as e:
        print("wrong at img decoding!!!", lineno())

    global count_img
    try:
        img, label = d.detect(img)
    except (cv2.error, UnicodeDecodeError) as e:
        print("wrong at detection", lineno())

    # cv2.imshow("Camera", img)
    # if cv2.waitKey(1) & 0xff == ord('q'):
    #     return
    succ = cv2.imwrite(os.path.join("data", "{count_img}.jpg").format(count_img=count_img),
                       cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    print(succ)
    count_img += 1
    return label
Example #9
0
def main(argv):
    # Prepare datasets
    base_dir = 'datasets'
    # Create directory to place clean datasets
    clean_dir = 'new_datasets'
    # For Face Detection we use caffemodel
    caffe_path = "res10_300x300_ssd_iter_140000.caffemodel"
    deploy_path = "deploy.prototxt.txt"
    # Input File
    test_input = ""

    # make an object TrainData
    model = TrainData(base_dir, clean_dir, deploy_path, caffe_path)
    for action in argv:
        if action == "train":
            model.train_data_flow()
        elif action == "predict":
            face = FaceDetector(deploy_path, caffe_path)
            img = cv2.imread("./datasets/2103181003/2103181003 (2).jpeg")
            image = face.normalize_faces(img)
            print(model.predict(image[0]))
        else:
            Detect(base_dir, clean_dir, deploy_path, caffe_path).video_rec()
Example #10
0
def upload():
    print('Inside Upload')
    if 'INPUTS' not in os.listdir():
        os.mkdir('INPUTS')
        print('INPUTS folder created')
    if 'OUTPUTS' not in os.listdir():
        os.mkdir('OUTPUTS')
        print('OUTPUTS folder created')
    if request.method == 'POST':
        print('Inside Post')

        f = request.get_json()
        try:
            imgstring = f['image']
            imgdata = base64.b64decode(imgstring)
            filename = './INPUTS/image.jpg'
            with open(filename, 'wb') as f:
                f.write(imgdata)
            Detect()
            one, two, five, ten = parsefromtext()
            total = (one * 1) + (two * 2) + (five * 5) + (ten * 10)
            imgstring = encode_image()
            return jsonify({
                "preds": "ok",
                "results": {
                    "one": one,
                    "two": two,
                    "five": five,
                    "ten": ten,
                    "total": total
                },
                "image_en": imgstring
            })
        except:
            return Response('Request format invalid.', status=400)
    return Response('Request format invalid', status=400)
Example #11
0
import time

parser = argparse.ArgumentParser()
parser.add_argument("--config", help="configuration file")
parser.add_argument("-d", "--debug", help="enable debug mode", action="store_true")
arguments = parser.parse_args()
if arguments.config is None:
  config_file = 'rpi-art.conf'
else:
  config_file = arguments.config
config_defaults = { 'width': 640, 'height': 480, 'fps': 24, 'slices': 8, 'debug': 'False' }
config = ConfigParser.ConfigParser(config_defaults)
config.read(config_file)
if arguments.debug:
  config.set('rpi-art', 'debug', 'True')

capture_queue = Queue()
capture_thread = Capture(1, "Capture", capture_queue, config)
detect_thread = Detect(2, "Detect", capture_queue, config)
capture_thread.start()
detect_thread.start()

try:
  while True:
    if config.getboolean('rpi-art', 'debug'):
      print "Queue length", capture_queue.qsize()
    time.sleep(1)
except KeyboardInterrupt:
  capture_thread.exit.set()
  detect_thread.exit.set()
Example #12
0
        img, label = d.detect(img)
    except (cv2.error, UnicodeDecodeError) as e:
        print("wrong at detection", lineno())

    # cv2.imshow("Camera", img)
    # if cv2.waitKey(1) & 0xff == ord('q'):
    #     return
    succ = cv2.imwrite(os.path.join("data", "{count_img}.jpg").format(count_img=count_img),
                       cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    print(succ)
    count_img += 1
    return label


# prepare opencv and GPU
with Detect() as d:
    os.system("rm -r data; mkdir data")
    os.system("rm -r distance; mkdir distance")
    TCP_IP = '10.42.0.1'
    # TCP_IP = '127.0.0.1'
    TCP_PORT = 5006
    TCP_BUFFER_SIZE = 655535

    UDP_IP = '10.42.0.1'
    UDP_PORT = 5005
    UDP_BUFFER_SIZE = 1024

    ###### initialize TCP server
    s1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s1.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    s1.bind((TCP_IP, TCP_PORT))
Example #13
0
num_layer = 2
num_units = 512
input_size = 32
batch_size = 10

g1 = tf.Graph()
g2 = tf.Graph()
sess1 = tf.Session(graph=g1)
sess2 = tf.Session(graph=g2)

with g1.as_default():
    recognizer = Recognizer(num_layer, num_units, input_size, batch_size,
                            sess1)

with g2.as_default():
    detector = Detect(sess2)


def run(img_path):
    boxs, img_box = detector.ctpn(img_path)
    img = cv2.imread(img_path)

    boxs = sorted(boxs, key=lambda x: x[1])

    imgs = []
    for b in boxs:
        x1, y1, x2, y2 = b
        imgs.append(img[y1:y2, x1:x2])

    img_num = len(imgs)
    while len(imgs) < batch_size:
Example #14
0
 def __init__(self, device):
     super(Process, self).__init__()
     self.detect = Detect(device)
     self.ocr = Ocr(device)
Example #15
0
from flask import Flask, render_template, request
from werkzeug.utils import secure_filename
import os
from ocr import Ocr
from detect import Detect
import time
import pytesseract
import cv2
from pdf2image import convert_from_path
from datetime import datetime

app = Flask(__name__)
ocr = Ocr()
detect = Detect()


@app.route('/hello', methods=['GET'])
def get():
    return 'hello i am dung'


@app.route('/process', methods=['POST'])
def processUrl():

    log = open("log.txt", "a")
    result = []
    if request.method == 'POST':
        for e in request.files:
            try:
                f = request.files[e]
                timestamp = datetime.now().timestamp()
Example #16
0
def main(args):
    # The image source may differ from the default depending on the command-line arguments
    source = None
    try:
        if args.picamera:
            source = capture.picamera()
            LOG.info('Will grab frames from the picamera.')
        elif args.image is not None:
            file = args.image
            source = capture.image_file(file)
            LOG.info('Will grab frames from a static image: {}.'.format(file))
        elif args.ipcamera is not None:
            url = 'http://{}/mjpg/video.mjpg'.format(args.ipcamera)
            source = capture.ip_camera(url)
            LOG.info('Will grab frames from IP camera at {}'.format(url))
        else:
            index = args.webcam
            source = capture.webcam(index)
            LOG.info('Will grab frames from webcam #{}.'.format(index))
    except (capture.CameraInitializationError, FileNotFoundError) as e:
        # TODO Do something meaningful and retry
        LOG.error(e)
        exit('Unable to read from image source.')

    # Initialize NetworkTables one time at the start; other code assumes it is initialized.
    network.init_network_tables(args.ip)

    # NetworkTables communication. The script aims to be interchangeable with GRIP.
    network_publisher = NetworkTablePublisher('GRIP/myContoursReport', silent=args.nt_silent)

    # Debug communication
    detect_receiver = None  # TODO
    detect_publisher = NetworkTablePublisher('Grapple', silent=args.nt_silent)
    detect = Detect(detect_receiver, detect_publisher)

    # For GUI debugging
    display = DisplayObserver()

    for image in source:
        targets = detect.get_data(image)

        # Adapt the payload into the same format as GRIP to be compatible with
        # the current code on the RIO
        # TODO this is probably why the zip function is a thing
        areas, centers_x, centers_y = [], [], []
        for area, (center_x, center_y) in targets:
            areas.append(area)
            centers_x.append(center_x)
            centers_y.append(center_y)
        network_publisher.on_next(('area', areas))
        network_publisher.on_next(('centerX', centers_x))
        network_publisher.on_next(('centerY', centers_y))

        if not args.no_gui:
            # Overlay the contours onto the original image and display it
            centers = list(zip(centers_x, centers_y))
            display.on_next(detect.to_image(centers))

    # All done, manually evoke cleanup callbacks
    network_publisher.on_completed()
    display.on_completed()
Example #17
0
        video = cv2.VideoCapture(filename)
        count = originalCount = 0
        all_images = []
        imlist = []
        result = pd.DataFrame([])
        increment = 0
        while True:
            count += 1
            originalCount += 1
            ret, frame = video.read()
            if ret:
                all_images.append(frame)
                imlist.append('{}.jpg'.format(originalCount))
            else:
                if (len(imlist) == len(all_images)) and len(imlist) != 0:
                    result_ = Detect(all_images, imlist, model)
                    result_ = list(result_)
                    temp_df = pd.DataFrame(result_)
                    temp_df.reset_index(drop=True)
                    temp_df[0] = pd.to_numeric(temp_df[0])
                    temp_df[0] = temp_df[0].apply(
                        lambda x: x + increment * BatchSize)
                    all_images = []
                    imlist = []
                    count = 0
                    increment += 1
                    frames = [result, temp_df]
                    result = pd.concat(frames)

                break
Example #18
0
    def run(self):
        # Set up LED pin
        LED = 27
        GPIO.setwarnings(False)
        GPIO.setmode(GPIO.BCM)
        GPIO.setup(LED, GPIO.OUT)

        # Initialize objects
        f = Firebase(self.q)
        d = Detect(f, self.q)
        m = Monitoring(f, self.q)
        s = Scale(self.q)

        # Set scale calibration
        s.setReferenceUnit(-25.725)
        s.reset()
        s.tare()

        self.q.put('Ready')

        # Loop until stop event is set
        while not self.event.is_set():

            # Loop while RPi is on AC power
            while m.powerOn:

                # Check if stop has been set
                if self.event.is_set():
                    break

                # Loop while fridge door is closed
                while m.doorClosed():
                    if not m.powerOn():
                        break
                    m.checkTemp()

                    # Check if stop hsa been set
                    if self.event.is_set():
                        break
                else:
                    print('Door was opened')
                    self.q.put('Door opened')
                    GPIO.output(LED, True)
                    m.startDoorTimer()

                    while m.doorOpen():
                        print('Waiting for door to close')

                        m.checkDoorTimer()
                        m.checkTemp()
                    else:
                        print('Door closed')
                        self.q.put('Door closed')

                        s.getWeight()
                        d.getImage()
                        d.detectItem()
                        d.parseResponse(s.weight)
                        s.tare()
                        GPIO.output(LED, False)

                        print('Done')
                        self.q.put('Done')
            else:
                m.powerSave()

        f.close(
        )  # Firebase app must be closed before we can create another instance
Example #19
0
        w_last_week1.extend(w_last_week2)
        dataA = to_str(w_now)
        dataB = to_str(w_yest1)
        dataC = to_str(w_last_week1)
        d = {
            "viewId": "2012",
            "viewName": "登陆功能",
            "attrId": "19201",
            "attrName": "ptlogin登陆请求总量",
            "window": w,
            "time": "2018-10-17 17:28:00",
        }
        d["dataC"] = dataC
        d["dataB"] = dataB
        d["dataA"] = dataA

        detector = Detect()
        TSD_OP_SUCCESS, ret_data = detector.value_predict(d)
        if ret_data['ret'] > 0.5:  #正常
            y.append(0)
        else:
            y.append(1)

    precison, recall, f1 = evalute_delay(test['label'], y, delay=7)
    log.append([num, precison, recall, f1])
    log = pd.DataFrame(log, columns=['kpi id', 'precison', 'recall', 'fscore'])
    log.to_csv(num + 'metis.csv')
    del train
    del test
    del t
Example #20
0
 def test_long_url(self):
     expected = ['www.google.com/telephone/wire',\
                 'http://pbreadinglist.herokuapp.com/books/TvEqDAAAQBAJ#.XVOriU5z2tA.twitter',\
                 'www.pip.org']
     self.assertEqual(Detect(long_url).url, expected)
Example #21
0




connection = Motion(IP, PORT, tries, interval)
motion = MotionDetect(tries, min_final_move, max_final_move)


while(connection.isAppWorking()):
    data = connection.motionData()
    isMotionDetected = motion.isMotionDetect(data)
    print(isMotionDetected)
    if(isMotionDetected):
        imgSource = connection.savePicture()
        face = Detect(imgSource, scale_factor)
        if os.path.isdir("./faces"):
            pass
        else:
            connection._createFolder("./faces")
        if os.path.isdir("./faces/" + datetime.date.today().strftime("%d.%m.%y")):
            pass
        else:
            connection._createFolder("./faces/" + datetime.date.today().strftime("%d.%m.%y"))

        if (face.face_detect() is not None):
            if(len(face.face_detect())):
                face.facecrop(face.face_detect())


Example #22
0
 def test_short_url(self):
     expected = ['google.com', 'twitter.com', 'facebook.com', "pybites.com"]
     self.assertEqual(Detect(short_url).url, expected)
Example #23
0
def begin():
    global is_teacher
    global username
    global password
    global classroomId
    is_connected = False

    ask_is_teacher = raw_input("Enter 'T' for teacher.")
    if ask_is_teacher == "T":
        is_teacher = True
        print("Welcome, teacher")
    else:
        print("Welcome, student")
        is_teacher = False

    username = raw_input("Please enter your username: "******"Please enter your password: "******"Working..")
    if not is_teacher:
        classroomId = raw_input("Please enter the classroom code: ")
        connection = Database(username)
        isvalid = connection.create_new_userconnection(username, classroomId,
                                                       0)

        if not isvalid:
            print("invalid response")
            while not isvalid:  # if entered classroomID is not valid keep asking for the correct one
                classroomId = raw_input("Please enter the classroom code: ")
                connection = Database(username)
                isvalid = connection.create_new_userconnection(
                    username, classroomId, 0)
                if isvalid:
                    break

        is_connected = True
        face_detect = Detect(username)
        face_detect.scan_image(connection)  #run this loop

        def active_loop(threadname, delay):
            while is_connected:
                print("detected face: " + str(face_detect.isActive))
                connection.active_status(face_detect.isActive)
                time.sleep(delay)

        thread.start_new_thread(active_loop, (
            "Active-Thread",
            5,
        ))
        if raw_input("Enter 'disconnect' to end session"):
            print("disconnected")
            connection.leave_classroom(username)
            face_detect.connected = False
    else:
        classroomId = raw_input("Please enter a classroom code: ")
        connection = Database(username)
        connection.create_new_userconnection(username, classroomId, 1)
        is_connected = True

        def update_loop(threadName, delay):
            while is_connected:
                connection.get_classroom_data(classroomId)
                time.sleep(delay)

            print("loop closed")

        thread.start_new_thread(update_loop, (
            "Thread-1",
            1,
        ))

        if raw_input("Enter 'disconnect' to end session"):
            is_connected = False
            print("disconnected")
            connection.end_classroom(classroomId)
Example #24
0
def DetectFace(img_path):
    face_token = Detect(img_path)
    print face_token
    return face_token
Example #25
0
parser.add_argument("--crawl",
                    nargs="?",
                    const=True,
                    help="Crawling recent crash dumps.")
parser.add_argument("--train",
                    nargs="?",
                    const=True,
                    help="Training for parameter tuning.")
parser.add_argument("--stop",
                    nargs="?",
                    const=True,
                    help="Count file names that can be filtered.")
parser.add_argument("--detect", nargs=2, help="Detect crash dump similarity.")
args = parser.parse_args()
# suppress warnings
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

if __name__ == "__main__":
    # crawling recent crash dumps
    if args.crawl:
        ETL().load()
    # training for parameter tuning
    if args.train:
        Train().training()
    # count file names that can be filtered
    if args.stop:
        StopWord().count_word()
    # detect crash dump similarity
    if args.detect:
        Detect(args.detect).detect_sim()
Example #26
0
        referer = str(request.referer)
    except Exception, e:
        insertLog(e)

    try:
        language = str(request.accept_language)
    except Exception, e:
        insertLog(e)

    try:
        agent = str(request.user_agent).lower()
    except Exception, e:
        insertLog(e)

    try:
        detect = Detect(agent)
    except Exception, e:
        insertLog(e)
        return

    try:
        weblog = WebLog()
        weblog.browser = detect.browser
        weblog.browser_ver = detect.browser_version
        weblog.os = detect.os
        weblog.os_ver = detect.os_version
        weblog.device = detect.device
        weblog.referer = referer
        weblog.language = language
        weblog.agent = agent
        weblog.put()
Example #27
0
from movement import Movement
from detect import Detect
from memap import MemoryMap

CYCLEDURATION = 0.2  #trocar por algo embasado depois


def cycle():

    try:

        while not rospy.is_shutdown():

            detect.update()
            movement.update()
            rospy.sleep(CYCLEDURATION)
            movement.stop()

    except rospy.ROSInterruptException:
        print("Ocorreu uma exceção com o rospy")


if __name__ == "__main__":

    rospy.init_node("rbpu_main")
    memap = MemoryMap()
    movement = Movement(memap)
    detect = Detect(memap)

    cycle()
Example #28
0
    with open(name + '.json', 'rb') as f:
        return json.load(f)


def dump(dictionary):
    print(json.dumps(dictionary, indent=4))


# Path to dataset

path_infiles = './data/CoMoFoD_small/'

in_list = listdir(path_infiles)
current_time = time()
KP = Keypoint()
DET = Detect()
result = {}

for file in in_list:
    filename, extension = path.splitext(path.basename(file))

    # create a new dictionary for each filename, add keys:
    #   'path', value: the path to the file
    #   'filename', value: the filename without the extension
    #   'extension', value: the ending of the file

    fname = re.search('^\d+_[FO]', filename).group(
        0)  # technically the original file name without classifiers

    if fname not in result:
        result[fname] = {}