示例#1
0
文件: server.py 项目: zhijiahu/pibots
async def main():
    # construct the argument parser and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-c",
                    "--conf",
                    required=True,
                    help="Path to the input configuration file")
    args = vars(ap.parse_args())

    # load the configuration file and label encoder
    conf = Conf(args["conf"])

    await asyncio.gather(read_frame(), process_frame(conf))

    cv2.destroyAllWindows()
import cv2

# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-c",
                "--conf",
                required=True,
                help="path to the configuration file")
ap.add_argument("-i",
                "--image",
                required=True,
                help="path to the image to be classified")
args = vars(ap.parse_args())

# load the configuration file
conf = Conf(args["conf"])

# load the classifier, then initialize the Histogram of Oriented Gradients descriptor
# and the object detector
model = pickle.loads(open(conf["classifier_path"], "rb").read())
hog = HOG(orientations=conf["orientations"],
          pixelsPerCell=tuple(conf["pixels_per_cell"]),
          cellsPerBlock=tuple(conf["cells_per_block"]),
          normalize=conf["normalize"],
          block_norm="L1")
od = ObjectDetector(model, hog)

# load the image and convert it to grayscale
image = cv2.imread(args["image"])
image = imutils.resize(image, width=min(260, image.shape[1]))
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
示例#3
0
from utils import Conf
from imutils import paths
import face_recognition
import argparse
import pickle
import cv2
import os
config_file = "/home/pi/Desktop/Attendance Management System/config/config.json"
conf = Conf(config_file)

print("[INFO] quantifying faces...")
imagePaths = list(
    paths.list_images(os.path.join(conf["dataset_path"], conf["class"])))
knownEncodings = []
knownNames = []
for (i, imagePath) in enumerate(imagePaths):
    print("[INFO] processing image {}/{}".format(i + 1, len(imagePaths)))
    print(imagePath)
    name = imagePath.split(os.path.sep)[-2]
    print(name)
    image = cv2.imread(imagePath)
    rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    encodings = face_recognition.face_encodings(rgb)

    for encoding in encodings:
        knownEncodings.append(encoding)
        knownNames.append(name)
print("[INFO] serializing encodings...")
data = {"encodings": knownEncodings, "names": knownNames}
f = open(conf["encodings_path"], "wb")
f.write(pickle.dumps(data))
                time.sleep(5)

        # Processes results
        if status >= 400:
            light.info(
                "[ERROR] Could not send data after 5 attempts, please check \
                your token credentials and internet connection")
            return False

        # light.info("[INFO] request made properly, your device is updated")
        return True

    def send_action(self, trackingId, actionName):
        light.info("send trackingId {} {} ".format(trackingId, actionName))
        thread = threading.Thread(target=self._send_action,
                                  args=(
                                      trackingId,
                                      actionName,
                                  ))
        thread.daemon = True
        thread.start()


if __name__ == '__main__':

    config_server = '../jetson/configs.json'
    conf = Conf(config_server)

    light_api = ApiLight(conf)

    light_api.send_action(123, "out")
示例#5
0
                help="Path to input video")
ap.add_argument("-c","--config", default="./config.json",
               help="Path to the input configuration file")
ap.add_argument("-s","--save",default=False,
                help="Save processed video (True/False)")
args = vars(ap.parse_args())


# initialize the lit of class labels MobileNet SSD
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
	"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
	"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
	"sofa", "train", "tvmonitor"]

# load the configuration file
conf = Conf(args["config"])

# load our serialized model from disk
def load_model():
    print("[INFO] Loading model...")
    
    if os.path.isfile(conf["prototxt_path"]) and os.path.isfile(conf["model_path"]): 
#        net = cv2.dnn.readNetFromCaffe("./MobileNetSSD/MobileNetSSD_deploy.prototxt",
#                                       "./MobileNetSSD/MobileNetSSD_deploy.caffemodel")
        net = cv2.dnn.readNetFromCaffe(conf["prototxt_path"],conf["model_path"])
        print("[INFO] Loaded model successfully...")
        return net
    else:
        print("Model is not found...")
            
# main function 
def run_core(conf):
    # bait

    global lock, frame_out

    detector_lock = threading.Lock()

    logruncv.info('Get camera List to run')

    # cam_cfgs = get_config_file()

    # camera_infor = Conf(conf['camera_info'])
    camera_infor = Conf(conf['camera_info'])
    zone_infor = Conf(conf['zone_info'])

    if camera_infor['camera_link'] is None or camera_infor[
            'camera_link'] is None or camera_infor['camera_id'] is None:
        raise "camera_infor slack information"

    if zone_infor['1'] is None or zone_infor['2'] is None or zone_infor[
            '3'] is None:
        raise "zone_infor slack information"

    cam_cfgs = {}

    cam_cfgs['cam_Link'] = camera_infor['camera_link']
    cam_cfgs['step'] = 1

    cam_cfgs['cam_Name'] = camera_infor['camera_name']
    cam_cfgs['cam_ID'] = camera_infor['camera_id']

    cam_cfgs['funcs'] = []

    zone_fillter = {}
    counter_infor = {}

    zone_fillter["func_ID"] = 5
    zone_fillter["func_Name"] = "zone filtter"
    zone_fillter["func_note"] = decode_note_polygon(zone_infor['1'])

    counter_infor["func_ID"] = 1
    counter_infor["func_Name"] = "counter"
    counter_infor["func_line"] = decode_note_counter(zone_infor['2'])
    counter_infor["func_pont"] = decode_note_counter(zone_infor['3'])

    cam_cfgs['funcs'].append(zone_fillter)
    cam_cfgs['funcs'].append(counter_infor)

    logruncv.info(cam_cfgs)

    # start 1 thread for 1 camera
    thread = threading.Thread(target=core_thread,
                              args=(conf, cam_cfgs, detector_lock, frame_out))
    thread.daemon = True
    thread.start()
    # start the flask app
    app.run(host='0.0.0.0',
            port=8888,
            debug=False,
            threaded=True,
            use_reloader=False)
示例#7
0
文件: GoldQuest.py 项目: ollej/Twippy
 def preloop(self):
     cfg = Conf('../config.ini', 'LOCAL')
     self.game = GoldQuest(cfg)