Пример #1
0
    def __init__(self, path, batchSize=4, queueSize=256):
        # initialize the file video stream along with the boolean
        # used to indicate if the thread should be stopped or not
        self.det_model = Darknet("yolo/cfg/yolov3-spp.cfg")
        self.det_model.load_weights('models/yolo/yolov3-spp.weights')
        self.det_model.net_info['height'] = opt.inp_dim
        self.det_inp_dim = int(self.det_model.net_info['height'])
        assert self.det_inp_dim % 32 == 0
        assert self.det_inp_dim > 32
        # self.det_model.cuda()
        self.det_model.eval()

        self.stream = cv2.VideoCapture(path)
        assert self.stream.isOpened(), 'Cannot capture source'
        self.stopped = False
        self.batchSize = batchSize
        self.datalen = int(self.stream.get(cv2.CAP_PROP_FRAME_COUNT))
        leftover = 0
        if (self.datalen) % batchSize:
            leftover = 1
        self.num_batches = self.datalen // batchSize + leftover
        # initialize the queue used to store frames read from
        # the video file
        self.Q = Queue(maxsize=queueSize)
Пример #2
0
        os.mkdir(args.outputpath)

    # Load input video
    fvs = WebcamLoader(webcam).start()
    (fourcc, fps, frameSize) = fvs.videoinfo()
    # Data writer
    save_path = os.path.join(args.outputpath,
                             'AlphaPose_webcam' + webcam + '.avi')
    writer = DataWriter(args.save_video, save_path,
                        cv2.VideoWriter_fourcc(*'XVID'), fps,
                        frameSize).start()

    # Load YOLO model
    print('Loading YOLO model..')
    sys.stdout.flush()
    det_model = Darknet("yolo/cfg/yolov3.cfg")
    det_model.load_weights('models/yolo/yolov3.weights')
    det_model.net_info['height'] = args.inp_dim
    det_inp_dim = int(det_model.net_info['height'])
    assert det_inp_dim % 32 == 0
    assert det_inp_dim > 32
    det_model.cuda()
    det_model.eval()

    # Load pose model
    pose_dataset = Mscoco()
    if args.fast_inference:
        pose_model = InferenNet_fast(4 * 1 + 1, pose_dataset)
    else:
        pose_model = InferenNet(4 * 1 + 1, pose_dataset)
    pose_model.cuda()
Пример #3
0
def read_model():
    model = Darknet('yolo/cfg/yolov3.cfg')
    model.load_weights('yolo/yolov3.weights')
    return model
Пример #4
0
def main():
    global args
    args = parser.parse_args()

    # Yolo
    confidence = float(args.confidence)
    nms_thesh = float(args.nms_thresh)
    start = 0
    CUDA = torch.cuda.is_available()

    num_classes = 80
    bbox_attrs = 5 + num_classes

    model = Darknet(args.config_file)
    model.load_weights(args.weights_file)

    model.net_info["height"] = args.reso
    inp_dim = int(model.net_info["height"])

    assert inp_dim % 32 == 0
    assert inp_dim > 32

    if CUDA:
        model.cuda()

    model.eval()

    # Connect
    client = paho.Client()
    host, port = args.broker_url.split(':')
    client.connect(host, int(port))

    # subscribe a system messages
    client.message_callback_add("$SYS/#", system_message)
    client.subscribe("$SYS/#")

    # Open rtsp stream
    cap = cv2.VideoCapture(args.input_url)

    assert cap.isOpened(), 'Cannot capture source {}'.format(args.input_url)

    # Inspect input stream
    input_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    input_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    input_fps = cap.get(cv2.CAP_PROP_FPS)
    print("[input stream] width: {}, height: {}, fps: {}".format(
        input_width, input_height, input_fps))

    # Open output stream
    output_command = stream_factory(args.output_url, input_width, input_height,
                                    input_fps)
    print(output_command)
    output_stream = sp.Popen(output_command, stdin=sp.PIPE, stderr=sp.PIPE)

    frames = 0
    start = time.time()

    while cap.isOpened():
        ret, frame = cap.read()  # frame size: 640x360x3(=691200)
        if ret:
            # Our detect operations on the frame come here

            img, orig_im, dim = prep_image(frame, inp_dim)

            if CUDA:
                im_dim = im_dim.cuda()
                img = img.cuda()

            output = model(Variable(img), CUDA)
            output = write_results(output,
                                   confidence,
                                   num_classes,
                                   nms=True,
                                   nms_conf=nms_thesh)

            if type(output) == int:
                frames += 1
                print("FPS of the video is {:5.2f}".format(
                    frames / (time.time() - start)))
                cv2.imshow("frame", orig_im)
                key = cv2.waitKey(1)
                if key & 0xFF == ord('q'):
                    break
                continue

            output[:, 1:5] = torch.clamp(output[:, 1:5], 0.0,
                                         float(inp_dim)) / inp_dim

            output[:, [1, 3]] *= frame.shape[1]
            output[:, [2, 4]] *= frame.shape[0]

            classes = load_classes('yolo/data/coco.names')
            colors = pkl.load(open("yolo/pallete", "rb"))

            # Overlay on screen
            list(map(lambda x: write(x, orig_im, classes, colors), output))
            # Send a BBoxes

            # Display the resulting frame
            cv2.imshow("frame", orig_im)
            frames += 1
            print("FPS of the video is {:5.2f}, size: {}".format(
                frames / (time.time() - start), orig_im.size))

            # Write rtmp stream
            output_stream.stdin.write(frame.tostring())
        else:
            break
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # Close
    cap.release()
    cv2.destroyAllWindows()
    client.disconnect()
Пример #5
0
        help=
        "Input resolution of the network. Increase to increase accuracy. Decrease to increase speed",
        default="416",
        type=str)
    return parser.parse_args()


cap = cv2.VideoCapture(
    '/home/gaurav/Desktop/sem6/VR/before_midsem/mini_project/harsh_without_mask.mp4'
)
args = arg_parse()
confidence = float(args.confidence)
nms_thesh = float(args.nms_thresh)
num_classes = 80
bbox_attrs = 5 + num_classes
yolo = Darknet(args.cfgfile)
yolo.load_weights(args.weightsfile)
yolo.net_info["height"] = args.reso
inp_dim = int(yolo.net_info["height"])
assert inp_dim % 32 == 0
assert inp_dim > 32
yolo.cuda()

PATH = '/home/gaurav/Desktop/sem6/VR/before_midsem/mini_project/model/classi4.pkl'
net = models.alexnet(pretrained=True)
my_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net.to(my_device)

loaded_model = pickle.load(open(PATH, 'rb'))
tot_cnt = 0
cnt = 0
Пример #6
0
if __name__ == '__main__':
    num_classes = 80

    args = arg_parse()
    confidence = float(args.confidence)
    nms_thesh = float(args.nms_thresh)
    start = 0
    CUDA = torch.cuda.is_available()
    classes = load_classes('data/coco.names')
    colors = load_colors('data/pallete')

    num_classes = 80
    bbox_attrs = 5 + num_classes

    model = Darknet(args.cfgfile, height=args.reso)
    model.load_state_dict(torch.load(args.weightsfile))

    model.net_info["height"] = args.reso
    inp_dim = int(model.net_info["height"])

    assert inp_dim % 32 == 0
    assert inp_dim > 32

    if CUDA:
        model.cuda()

    model.eval()

    cap = cv2.VideoCapture(args.video)