def test():
    vocab_size = VOCAB_SIZE + 2
    model = Seq2seqModel(sp.attn_model, vocab_size, sp.hidden_size, vocab_size,
                         sp.num_layer, sp.dropout)
    model.load_model()
    max_len = MAX_LEN + 1
    start = token2id.get('^', 37)
    id2token = {str(idx): token for token, idx in token2id.items()}
    for filename in os.listdir('./images'):
        x, y, lens, label = load_image('./images/' + filename)
        x = Variable(x.unsqueeze(0))

        beam_path = model.best_path(x, max_len, start, topk=3)
        greedy_path = model.best_path(x, max_len, start, topk=1)
        beam_path = ''.join([
            id2token.get(str(kk), '_') for kk in beam_path[0]
        ]).split('$')[0].lower()
        greedy_path = ''.join([
            id2token.get(str(kk), '_') for kk in greedy_path[0]
        ]).split('$')[0].lower()

        beam_true = 1 if beam_path == label.lower() else 0
        greedy_true = 1 if greedy_path == label.lower() else 0
        print("label\t{}\tbeam\t{}\t{}\tgreedy\t{}\t{}".format(
            label, beam_path, beam_true, greedy_path, greedy_true))
Exemplo n.º 2
0
def main(mode, tiny, iou_threshold, confidence_threshold, path):
  class_names, n_classes = load_class_names()
  if tiny:
    model = YOLOv3_tiny(n_classes=n_classes,
                        iou_threshold=iou_threshold,
                        confidence_threshold=confidence_threshold)
  else:
    model = YOLOv3(n_classes=n_classes,
                  iou_threshold=iou_threshold,
                  confidence_threshold=confidence_threshold)
  inputs = tf.placeholder(tf.float32, [1, *model.input_size, 3])
  detections = model(inputs)
  saver = tf.train.Saver(tf.global_variables(scope=model.scope))

  with tf.Session() as sess:
    saver.restore(sess, './weights/model-tiny.ckpt' if tiny else './weights/model.ckpt')

    if mode == 'image':
      image = load_image(path, input_size=model.input_size)
      result = sess.run(detections, feed_dict={inputs: image})
      draw_boxes(path, boxes_dict=result[0], class_names=class_names, input_size=model.input_size)
      return

    elif mode == 'video':
      cv2.namedWindow("Detections")
      video = cv2.VideoCapture(path)
      fourcc = int(video.get(cv2.CAP_PROP_FOURCC))
      fps = video.get(cv2.CAP_PROP_FPS)
      frame_size = (int(video.get(cv2.CAP_PROP_FRAME_WIDTH)), int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)))
      out = cv2.VideoWriter('./detections/video_output.mp4', fourcc, fps, frame_size)
      print("Video being saved at \"" + './detections/video_output.mp4' + "\"")
      print("Press 'q' to quit")
      while True:
        retval, frame = video.read()
        if not retval:
          break
        resized_frame = cv2.resize(frame, dsize=tuple((x) for x in model.input_size[::-1]), interpolation=cv2.INTER_NEAREST)
        result = sess.run(detections, feed_dict={inputs: [resized_frame]})
        draw_boxes_frame(frame, frame_size, result, class_names, model.input_size)
        cv2.imshow("Detections", frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord('q'):
            break
        out.write(frame)
      cv2.destroyAllWindows()
      video.release()
      return

    elif mode == 'webcam':
      while True:
        frame, addr = receive()
        frame_size = (frame.shape[1], frame.shape[0])
        resized_frame = cv2.resize(frame, dsize=tuple((x) for x in model.input_size[::-1]), interpolation=cv2.INTER_NEAREST)
        result = sess.run(detections, feed_dict={inputs: [resized_frame]})
        draw_boxes_frame(frame, frame_size, result, class_names, model.input_size)
        jpgstring = cv2.imencode(".jpg", frame)
        packet = jpgstring[1].tostring()
        udpServSock.sendto(packet, addr)
      return
Exemplo n.º 3
0
def install_netcity_menu():
    x_telegram_user_action = crud_search_action_window(
        odoo, [["name", "=", "Telegram Users"]])[0]

    image = load_image("assets/logo_fastapi.png")

    netcity_menu = Menu(
        name="NetCity",
        web_icon_data=image,
        action=f"ir.actions.act_window,{x_telegram_user_action}")

    crud_create_menu(odoo, netcity_menu)
Exemplo n.º 4
0
def test():
    ctc = CTCModel(output_size=VOCAB_SIZE,
                   num_layers=hp.num_layer,
                   num_units=hp.num_units,
                   dropout=hp.dropout)
    ctc.load_model()
    id2token = {str(idx): token for token, idx in token2id.items()}
    for filename in os.listdir('./images'):
        x, y, lens, label = load_image('./images/' + filename)
        x = Variable(x.unsqueeze(0))
        outputs = CTCtest(x, ctc, use_cuda=False)
        pre_label = ''.join([id2token.get(str(idx), '_') for idx in outputs])
        acc = 1 if pre_label == label.lower() else 0
        print("pre:\t{}\t\ttruth:\t{}\t\t是否正确\t{}".format(
            pre_label, label, acc))
Exemplo n.º 5
0
def test():
    model = CTCModel(output_size=VOCAB_SIZE,
                     num_layers=hp.num_layer,
                     hidden_size=hp.hidden_size,
                     dropout=hp.dropout)
    model.load_model()
    id2token = {str(idx): token for token, idx in token2id.items()}
    for filename in os.listdir(TEST_DATA):
        x, y, lens, label = load_image(TEST_DATA + filename)
        x = Variable(x.unsqueeze(0))
        outputs = model(x)
        predict = model.decode_ctc_outputs(outputs, blank=0)
        pre_label = ''.join(
            [id2token.get(str(idx), '_') for idx in predict[0]])
        acc = 1 if pre_label.lower() == label.lower() else 0
        print("predict:\t{}\t\tlabel:\t{}\t\t{}".format(pre_label, label, acc))
def main(mode, tiny, iou_threshold, confidence_threshold, path):
    rectangleColor = (0, 255, 0)
    frameCounter = 0
    currentCarID = 0
    fps = 0

    carTracker = {}
    carNumbers = {}
    carLocation1 = {}
    carLocation2 = {}
    speed = [None] * 1000

    class_names, n_classes = load_class_names()
    if tiny:
        model = YOLOv3_tiny(n_classes=n_classes,
                            iou_threshold=iou_threshold,
                            confidence_threshold=confidence_threshold)
    else:
        model = YOLOv3(n_classes=n_classes,
                       iou_threshold=iou_threshold,
                       confidence_threshold=confidence_threshold)
    inputs = tf.placeholder(tf.float32, [1, *model.input_size, 3])
    detections = model(inputs)
    saver = tf.train.Saver(tf.global_variables(scope=model.scope))

    with tf.Session() as sess:
        saver.restore(
            sess, './weights/model-tiny.ckpt' if tiny else './weights/model.ckpt')

        if mode == 'image':
            image = load_image(path, input_size=model.input_size)
            result = sess.run(detections, feed_dict={inputs: image})
            draw_boxes(
                path, boxes_dict=result[0], class_names=class_names, input_size=model.input_size)
            return

        cv2.namedWindow("Detections")
        
        
        print("Video being saved at \"" + './detections/video_output.mp4' + "\"")
        print("Press 'q' to quit")
        pathIn = './data/PETS_2000_Frames/'
        files = [f for f in os.listdir(pathIn)]
        files.sort(key= lambda x: int(x.split('.')[0].split('_')[1]))
        frame=cv2.imread(pathIn+files[0])
        frame_size = frame.shape[:2][::-1]
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        
        out = cv2.VideoWriter(
            './detections/video_output.mp4', fourcc, 20, frame_size)
        for i in range(len(files)):
            frame = cv2.imread(pathIn+files[i])
            resized_frame = cv2.resize(frame, dsize=tuple(
                (x) for x in model.input_size[::-1]), interpolation=cv2.INTER_NEAREST)
            result = sess.run(detections, feed_dict={inputs: [resized_frame]})
            draw_boxes_frame(frame, frame_size, result,
                             class_names, model.input_size)
            start_time = time.time()
              # rc, image = video.read()
            if type(frame) == type(None):
              break

            frame = cv2.resize(frame, frame_size)
            resultImage = frame

            frameCounter = frameCounter + 1

            carIDtoDelete = []

            for carID in carTracker.keys():
                trackingQuality = carTracker[carID].update(frame)

                if trackingQuality < 7:
                    carIDtoDelete.append(carID)

            for carID in carIDtoDelete:
                print('Removing carID ' + str(carID) + \
                      ' from list of trackers.')
                print('Removing carID ' + str(carID) + ' previous location.')
                print('Removing carID ' + str(carID) + ' current location.')
                carTracker.pop(carID, None)
                carLocation1.pop(carID, None)
                carLocation2.pop(carID, None)

            if not (frameCounter % 10):
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                cars = carCascade.detectMultiScale(
                    gray, 1.1, 13, 18, (24, 24))

                for (_x, _y, _w, _h) in cars:
                    x = int(_x)
                    y = int(_y)
                    w = int(_w)
                    h = int(_h)

                    x_bar = x + 0.5 * w
                    y_bar = y + 0.5 * h

                    matchCarID = None

                    for carID in carTracker.keys():
                        trackedPosition = carTracker[carID].get_position()

                        t_x = int(trackedPosition.left())
                        t_y = int(trackedPosition.top())
                        t_w = int(trackedPosition.width())
                        t_h = int(trackedPosition.height())

                        t_x_bar = t_x + 0.5 * t_w
                        t_y_bar = t_y + 0.5 * t_h

                        if ((t_x <= x_bar <= (t_x + t_w)) and (t_y <= y_bar <= (t_y + t_h)) and (x <= t_x_bar <= (x + w)) and (y <= t_y_bar <= (y + h))):
                            matchCarID = carID

                    if matchCarID is None:
                        print('Creating new tracker ' + str(currentCarID))

                        tracker = dlib.correlation_tracker()
                        tracker.start_track(
                            frame, dlib.rectangle(x, y, x + w, y + h))

                        carTracker[currentCarID] = tracker
                        carLocation1[currentCarID] = [x, y, w, h]

                        currentCarID = currentCarID + 1

            for carID in carTracker.keys():
                trackedPosition = carTracker[carID].get_position()
                t_x = int(trackedPosition.left())
                t_y = int(trackedPosition.top())
                t_w = int(trackedPosition.width())
                t_h = int(trackedPosition.height())
                carLocation2[carID] = [t_x, t_y, t_w, t_h]

            end_time = time.time()

            if not (end_time == start_time):
                # print("Reached at 168") 
                fps = 1.0/(end_time - start_time)
            for i in carLocation1.keys():
                if frameCounter % 1 == 0:
                    [x1, y1, w1, h1] = carLocation1[i]
                    [x2, y2, w2, h2] = carLocation2[i]

                    # print 'previous location: ' + str(carLocation1[i]) + ', current location: ' + str(carLocation2[i])
                    carLocation1[i] = [x2, y2, w2, h2]
                    # print("Reached at 177")

                    # print 'new previous location: ' + str(carLocation1[i])
                    if [x1, y1, w1, h1] != [x2, y2, w2, h2]:
                        # print("Reached at 181") 
                        if (speed[i] == None or speed[i] == 0):
                            speed[i] = estimateSpeed(
                                [x1, y1, w1, h1], [x2, y2, w2, h2])

                        # if y1 > 275 and y1 < 285:
                        if speed[i] != None:
                            print(str(int(speed[i])) + " km/hr")
                            cv2.putText(resultImage, str(int(speed[i])) + " km/hr", (int(x1 + w1/2), int(y1-5)), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2)
            cv2.imshow("Detections", resultImage)
            key = cv2.waitKey(1) & 0xFF
            if key == ord('q'):
                break
            out.write(resultImage)
        cv2.destroyAllWindows()
Exemplo n.º 7
0
 def set_image(self, image_file):
   if image_file:
     self.image_file = image_file
   self._image, self.rect = load_image(self.image_file)