示例#1
0
 def elapsed(self, last_step):
     """
     Return the full and incremental elasped time.
     """
     total = (time.time() - self.start_time)
     last = (time.time() - last_step)
     return 'Total time: %s | Last step: %s' % (self.get_time(round(total,4)), self.get_time(round(last,8)))
示例#2
0
文件: calc.py 项目: Dineshs91/crawler
 def wrappedFunc(*args, **kwargs):
     start_time = time.time()
     val = func(*args, **kwargs)
     end_time = time.time()
     print '\n-------------'
     print 'Function-name:', func.func_name
     print 'Time: %fs' % (end_time - start_time)
     print '-------------'
     return val
 def _wrapper(x, y, color, *args, **kwargs):
     time_before = time.time()
     func_call_result = func(x, y, color, *args, **kwargs)
     time_after = time.time()
     total_time_spend = time_after - time_before
     print "Total time spent: {0}".format(str(total_time_spend))
     print "Total array size: {0}\n".format(numpy.size(func_call_result.pixels))
     timing_results.update(func_call_result(numpy.size(func_call_result.pixels), total_time_spend))
     return func_call_result
 def run(func, im, format, t):
     global image
     image = im
     start = time.time()
     set = t.repeat(iterations,1)
     elapsed = (time.time() - start)
     min_ = min(set)*1000
     avg = (sum(set)/len(set))*1000
     name = func.__name__ + ' ' + format
     results[name] = [min_,avg,elapsed*1000,name,len(func())]
     sortable[name] = [min_]
示例#5
0
 def show(self,**kwargs):
     timeit = kwargs.get('timeit')
     if timeit:
         start = time.time()
     # todo - we should check for postgis support, and otherwise
     # use the MemoryDatasource
     lyr_adapter = PostgisLayer(self)
     lyr_adapter.show(**kwargs)
     if timeit:
         elapsed = (time.time() - start)/60
         print 'render time: %s seconds' % elapsed 
示例#6
0
 def output_time(self, print_time):
     """
     Timing output wrapper to control the start point and verbosity of timing output.
     """
     if self.timing_started and print_time:
         val = color_text(4,self.elapsed(time.time()),self.no_color)
         sys.stderr.write('%s\n' % val)
示例#7
0
 def total_time(self, last_step=None):
     if self.verbose:
         total = time.time() - self.start_time
         out = "Total Nik2img run time: %s" % (self.get_time(round(total, 4)))
         if last_step:
             out += "| Last step: %s" % self.get_time(round(last_step, 8))
         val = color_text(4, out, self.no_color)
         sys.stderr.write("%s\n" % val)
def main():
    frames = video_by_frame("./data/video.mp4")
    tracking_frames = track_video(frames)
    start_time = time.time()

    for im in tracking_frames:
        stop_time = time.time()
        time_elapsed = 1 / (stop_time - start_time)
        draw_str(im, (20, 20), 'FPS: %.2f' % time_elapsed)
        start_time = stop_time

        cv2.imshow('LKtrack', im)
        key = cv2.waitKey(40)
        if key == 27 or key == ord('q') or key == ord('Q'):
            break

    cv2.destroyAllWindows()
示例#9
0
文件: gen.py 项目: tadeoos/nltkgenlit
def generate_cache(path, files):
    cache_dict = {}
    time_zero = time.time()
    for file in files:
        start_time = time.time()
        cprint('\n======= processing file {}...'.format(file), color='blue')
        f = open(os.path.join(path, file), 'rU', encoding='utf-8')
        raw = f.read()
        f.close()

        tokens = nltk.word_tokenize(raw)
        book = nltk.Text(tokens)
        hapaxes = len(nltk.probability.FreqDist(
            book).hapaxes()) / len(set(book))
        vocab_count = len(set(book))
        print(
            '=== {:.2f}s -- Text class initialized'.format(time.time() - start_time))

        bigrams = nltk.bigrams(book)
        bi_cfd = nltk.ConditionalFreqDist(bigrams)
        print('=== {:.2f}s -- bigrams done'.format(time.time() - start_time))

        trigrams = nltk.trigrams(book)
        tri_cfd = nltk.ConditionalFreqDist(((x, y), z) for x, y, z in trigrams)
        print('=== {:.2f}s -- trigrams done'.format(time.time() - start_time))

        file_dict = {'bigrams': bi_cfd, 'trigrams': tri_cfd}
        cache_dict['{}'.format(file)] = file_dict
        cache_dict['{}'.format(file)].update(
            {'hap': hapaxes, 'v_count': vocab_count})
        cprint('====== {:.2f}s -- cached {}'.format(time.time() -
                                                    start_time, file), color='green')

    cprint('\n\n=========== CACHE TOOKED {:.2f} seconds\n =========='.format(
        time.time() - time_zero), color='yellow')
    return cache_dict
def deteksi_ds(yolo):

    # Definisikan Parameternya
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0
    tulis_video_output = True

    # Model DEEP SORT diambil di sini..
    namafile_model = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(namafile_model, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    video_capture = cv2.VideoCapture("demo-1.mp4")
    video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 160)
    video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 120)

    #
    # Kalau ingin menulis hasil rekaman videonya buat VideoWriter object dgn codec-nya
    # sekalian tulis juga hasil deteksi ke file txt
    #
    if tulis_video_output:

        w = int(video_capture.get(3))
        h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        out = cv2.VideoWriter('hasil_output2.avi', fourcc, 15, (w, h))
        list_file = open('hasil_deteksi.txt', 'w')
        frame_index = -1

    # --------------------
    # MULAI CAPTURE VIDEO
    # --------------------
    # FPS awal
    fps = 0.0
    jum_track = set()

    while True:
        ret, frame = video_capture.read()

        # Warna Gray
        # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # baca frame
        if ret != True:
            break

        # siapkan hitungan waktu
        t1 = time.time()

        # konversi bgr ke rgb
        image = Image.fromarray(frame[..., ::-1])
        kotak = yolo.detect_image(image)

        # print("[-] Kotak : ",len(kotak))
        features = encoder(frame, kotak)

        # score to 1.0 here).
        deteksi_box = [
            Detection(bbox, 1.0, feature)
            for bbox, feature in zip(kotak, features)
        ]

        # panggil non-maxima suppression
        boxes = np.array([d.tlwh for d in deteksi_box])
        scores = np.array([d.confidence for d in deteksi_box])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        deteksi_box = [deteksi_box[i] for i in indices]

        # Panggil class tracker
        tracker.predict()
        tracker.update(deteksi_box)

        # Tracking hasil deteksi
        for track in tracker.tracks:
            # apakah berhasil di track?
            if not track.is_confirmed() or track.time_since_update > 1:
                continue

            # buat bounding box-nya
            bbox = track.to_tlbr()

            # box
            # Ini adalah prediction box
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)

            # teks untuk box_id
            cv2.putText(frame, str(track.track_id),
                        (int(bbox[0]), int(bbox[1])), 0, 5e-3 * 200,
                        (0, 255, 0), 1)

            jum_track.add(track.track_id)

            cv2.putText(frame, "> Jumlah Orang : " + str(max(jum_track)),
                        (10, 25), cv2.FONT_HERSHEY_DUPLEX, .5, (0, 0, 255), 1)

            print(">> Hits (Manusia) :", max(jum_track))

        #
        # pastikan box deteksi ada terus
        # ini adalah Detection Box
        for det in deteksi_box:
            bbox = det.to_tlbr()
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (255, 0, 0), 2)

        # tampilkan - DEBUG
        cv2.imshow('Test Video', frame)

        #
        # Jika tulis video?
        #
        if tulis_video_output:

            out.write(frame)
            frame_index = frame_index + 1
            list_file.write(str(frame_index) + ' ')
            if len(kotak) != 0:
                for i in range(0, len(kotak)):
                    list_file.write(
                        str(kotak[i][0]) + ' ' + str(kotak[i][1]) + ' ' +
                        str(kotak[i][2]) + ' ' + str(kotak[i][3]) + ' ')
            list_file.write('\n')

        # tampilan fps
        fps = (fps + (1. / (time.time() - t1))) / 2
        print(">> fps= %f" % (fps))

        # tekan Q untuk stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # bersihkan video_capture
    video_capture.release()
    if tulis_video_output:
        # release video
        out.release()
        # tutup file
        list_file.close()
    # release semua
    cv2.destroyAllWindows()
    z = 1
    x = f(0)
    y = f(x)
    
    for _ in xrange(iter1):
        z = 1
        for _ in xrange(iter2):
            x = f(x) % n
            y = f(f(y)) % n
            z = (z * (x - y)) % n
        d =  gcd(z, n)
        if d > 1:
            return d, n//d
    return d, n//d
# 187, 4717, 40100490452444053, 278009, 63053699, 549314599, 7247123747459, 2097335995683611, 4274010960572200553847767
start_time = time.time()
print Pollard(40100490452444053)
elapsed_time = time.time() - start_time
print("%0.10f" % elapsed_time) 


def isqrt(n):
    x = n
    y = (x + 1) // 2
    while y < x:
        x = y
        y = (x + n // x) // 2
    return x

def Fermat(n):
    """
示例#12
0
    return frame


net = build_ssd('test', 300, 21)  # initialize SSD
net.load_state_dict(torch.load('data/weights/ssd_300_VOC0712.pth'))
transform = BaseTransform(net.size, (104 / 256.0, 117 / 256.0, 123 / 256.0))

video_capture = cv2.VideoCapture(webcam_index)

fps = 0.0
while True:
    ret, frame = video_capture.read()  # frame shape 640*480*3
    if frame.shape[0] == 0:
        break

    t1 = time.time()

    # rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

    # bboxes = face_detector.predict(rgb_frame, thresh)
    ann_frame = predict(frame)

    bg = np.zeros((map_height, map_width, 3))

    # for box in bboxes:
    #     actual_face_size = math.sqrt(box[2] * box[3]) # use area of the face size as a measure of distance
    #     distance = max_face_size - actual_face_size # closest faces should be lowest values

    #     x = box[0] / max_x # scale to 0-1
    #     y = distance / max_y # scale to 0-1
    #     # print(str(x) + ", " + str(y))
示例#13
0
def main(yolo):

    # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0

    width = 1280
    height = 720
    rfps = 10

    # Deep SORT
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    tracking = args.tracking
    writeVideo_flag = args.writeVideo_flag
    asyncVideo_flag = args.asyncVideo_flag
    webcamera_flag = args.webcamera_flag
    ipcamera_flag = args.ipcamera_flag
    udp_flag = args.udp_flag

    full_cam_addr, key = sd.set_address(args.ipaddress, args.cam_ip,
                                        args.cam_cmd, args.key)
    cam_ip = full_cam_addr.replace(args.cam_cmd,
                                   "").replace("rtsp://*****:*****@", "")
    if args.jpegmode:
        full_cam_addr = full_cam_addr.replace("rtsp", "http")

    print(full_cam_addr)
    print(key)

    if asyncVideo_flag:
        print("load videofile")
        video_capture = VideoCaptureAsync(args.videofile)
    elif ipcamera_flag or args.jpegmode:
        print("load ipcamera")
        video_capture = cv2.VideoCapture(full_cam_addr)
        # width = video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)
        # height = video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
        # rfps = video_capture.get(cv2.CAP_PROP_FPS)
        print("fps:{}width:{}height:{}".format(rfps, width, height))
    elif webcamera_flag:
        print("load webcamera")
        video_capture = cv2.VideoCapture(0)
    else:
        print("load videofile")
        video_capture = cv2.VideoCapture(args.videofile)

    # video_capture.start()

    if writeVideo_flag:
        if asyncVideo_flag:
            w = int(video_capture.cap.get(3))
            h = int(video_capture.cap.get(4))
        else:
            w = int(video_capture.get(3))
            h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter('output_yolov4.avi', fourcc, 30, (w, h))
        frame_index = -1

    if udp_flag:
        HOST = ''
        PORT = 5000
        address = '192.168.2.255'
        sock = socket(AF_INET, SOCK_DGRAM)
        sock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
        sock.bind((HOST, PORT))

    fps = 0.0

    i = 0

    if not args.maskoff:
        maskbgi = Image.new('RGB', (int(width), int(height)), (0, 0, 0))
        mask = Image.open(args.maskdir + 'mask' + args.ipaddress[-1] +
                          '.png').convert("L").resize(size=(int(width),
                                                            int(height)),
                                                      resample=Image.NEAREST)

    while True:
        nowtime = datetime.datetime.now(
            timezone('Asia/Tokyo')).strftime('%Y-%m-%d %H:%M:%S.%f%z')
        ret, frame = video_capture.read()  # frame shape 640*480*3
        if not ret:
            print('cant read')
            video_capture = cv2.VideoCapture(full_cam_addr)
            continue

        t1 = time.time()

        try:
            image = Image.fromarray(frame[..., ::-1])  # bgr to rgb
        except TypeError:
            video_capture = cv2.VideoCapture(full_cam_addr)
            continue
        image = Image.composite(maskbgi, image, mask)
        boxes, confidence, classes = yolo.detect_image(image)

        if tracking:
            features = encoder(frame, boxes)

            detections = [
                Detection(bbox, confidence, cls, feature)
                for bbox, confidence, cls, feature in zip(
                    boxes, confidence, classes, features)
            ]
        else:
            detections = [
                Detection_YOLO(bbox, confidence, cls)
                for bbox, confidence, cls in zip(boxes, confidence, classes)
            ]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]
        car_data = {}
        if tracking:
            # Call the tracker
            tracker.predict()
            tracker.update(detections)

            for track in tracker.tracks:
                if not track.is_confirmed() or track.time_since_update > 1:
                    continue
                bbox = track.to_tlbr()
                car_data[str(track.track_id)] = [
                    int(bbox[0]),
                    int(bbox[1]),
                    int(bbox[2]),
                    int(bbox[3])
                ]
            sd.send_amqp(
                sd.create_jsondata(cam_ip, nowtime,
                                   time.time() - t1, car_data, args.jsonfile,
                                   args.json_path, i), key, args.AMQPHost)
            i += 1

        if not asyncVideo_flag:
            fps = (fps + (1. / (time.time() - t1))) / 2
            print("FPS = %f" % (fps))

        ### 読み飛ばし処理を追加 ###
        if not args.jsonfile and args.skip:
            if fps <= 10:
                for _i in range(int(math.ceil(rfps / fps)) - 1):
                    ret, frame = video_capture.read()

    if asyncVideo_flag:
        video_capture.stop()
    else:
        video_capture.release()

    if writeVideo_flag:
        out.release()

    cv2.destroyAllWindows()
示例#14
0
文件: demo.py 项目: SpecDI/cs407
def main(yolo, sequence_file, fps_render_rate, writeVideo_flag, labels_file,
         hide_window):
    # Compute output file
    file_name = os.path.splitext(
        os.path.basename(sequence_file))[0] if sequence_file != '0' else '0'
    if sequence_file == '0':
        sequence_file = 0

    # Compute the action map if labels provided
    action_map = dict()
    if labels_file != None:
        action_map = parse_labels_file(labels_file)
    print(action_map)

    # Build directory path
    frames_dir_path = "output/action_tubes/" + file_name
    if not writeVideo_flag:
        if os.path.exists(frames_dir_path):
            shutil.rmtree(frames_dir_path)
        os.mkdir(frames_dir_path)

    # Create coords dir for movie
    coords_path = 'output/tracked_bounding_boxes/' + file_name + '.json'

    output_seq = 'output/annotated_videos/' + file_name + '.avi'

    # Dict of coordinates for each tracked individual
    track_map = dict()

    # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0

    # deep_sort
    model_filename = 'object_detection/model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    video_capture = cv2.VideoCapture(sequence_file)

    if writeVideo_flag:
        # Define the codec and create VideoWriter object
        w = int(video_capture.get(3))
        h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'XVID')  #*'MJPG'
        # Build video output handler only if we are not cropping
        out = cv2.VideoWriter(output_seq, fourcc, fps_render_rate, (w, h))
        list_file = open('detection.txt', 'w')
        frame_index = -1

    fps = 0.0
    frame_number = 0
    while video_capture.isOpened():
        frame_number += 1
        ret, frame = video_capture.read()  # frame shape 640*480*3
        if ret != True:
            break
        t1 = time.time()

        # image = Image.fromarray(frame)
        image = Image.fromarray(frame[..., ::-1])  #bgr to rgb
        boxs = yolo.detect_image(image)
        # print("box_num",len(boxs))
        features = encoder(frame, boxs)

        # score to 1.0 here).
        detections = [
            Detection(bbox, 1.0, feature)
            for bbox, feature in zip(boxs, features)
        ]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]

        # Call the tracker
        tracker.predict()
        tracker.update(detections)
        for track in tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            bbox = track.to_tlbr()
            crop_img = frame[int(bbox[1]):int(bbox[3]),
                             int(bbox[0]):int(bbox[2])].copy()

            # Append coordinates for individual to track map
            if track.track_id not in track_map:
                track_map[track.track_id] = [
                    (frame_number,
                     [int(bbox[0]),
                      int(bbox[1]),
                      int(bbox[2]),
                      int(bbox[3])])
                ]
            else:
                track_map[track.track_id].append(
                    (frame_number,
                     [int(bbox[0]),
                      int(bbox[1]),
                      int(bbox[2]),
                      int(bbox[3])]))

            # Build directory path
            frames_dir_path = "output/action_tubes/" + file_name + '/' + str(
                track.track_id)
            if not os.path.exists(frames_dir_path) and not writeVideo_flag:
                os.mkdir(frames_dir_path)
            # Write frame or annotate frame
            if not writeVideo_flag:
                cv2.imwrite(frames_dir_path + "/" + str(frame_number) + ".jpg",
                            crop_img)
            else:
                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)

                append_str = str(track.track_id) + ": Person"
                if track.track_id in action_map:
                    append_str += ' ' + action_map[track.track_id]
                cv2.putText(frame, append_str, (int(bbox[0]), int(bbox[1])), 0,
                            5e-3 * 200, (0, 255, 0), 2)

        with open(coords_path, 'w') as fp:
            json.dump(track_map, fp)

        for det in detections:
            bbox = det.to_tlbr()
            if writeVideo_flag:
                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])), (255, 0, 0), 2)

        if not hide_window:
            cv2.imshow('', cv2.resize(frame, (1200, 675)))

        if writeVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1
            list_file.write(str(frame_index) + ' ')
            if len(boxs) != 0:
                for i in range(0, len(boxs)):
                    list_file.write(
                        str(boxs[i][0]) + ' ' + str(boxs[i][1]) + ' ' +
                        str(boxs[i][2]) + ' ' + str(boxs[i][3]) + ' ')
            list_file.write('\n')

        fps = (fps + (1. / (time.time() - t1))) / 2
        print("fps= %f" % (fps))

        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    video_capture.release()
    if writeVideo_flag:
        out.release()
        list_file.close()
    cv2.destroyAllWindows()
示例#15
0
def main(yolo):

    # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0

    # Deep SORT
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    tracking = True
    writeVideo_flag = True
    asyncVideo_flag = False

    file_path = 'video.webm'
    if asyncVideo_flag:
        video_capture = VideoCaptureAsync(file_path)
    else:
        video_capture = cv2.VideoCapture(file_path)

    if asyncVideo_flag:
        video_capture.start()

    if writeVideo_flag:
        if asyncVideo_flag:
            w = int(video_capture.cap.get(3))
            h = int(video_capture.cap.get(4))
        else:
            w = int(video_capture.get(3))
            h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter('output_yolov4.avi', fourcc, 30, (w, h))
        frame_index = -1

    fps = 0.0
    fps_imutils = imutils.video.FPS().start()

    while True:
        ret, frame = video_capture.read()  # frame shape 640*480*3
        if ret != True:
            break

        t1 = time.time()

        image = Image.fromarray(frame[..., ::-1])  # bgr to rgb
        boxes, confidence, classes = yolo.detect_image(image)

        if tracking:
            features = encoder(frame, boxes)

            detections = [
                Detection(bbox, confidence, cls, feature)
                for bbox, confidence, cls, feature in zip(
                    boxes, confidence, classes, features)
            ]
        else:
            detections = [
                Detection_YOLO(bbox, confidence, cls)
                for bbox, confidence, cls in zip(boxes, confidence, classes)
            ]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]

        if tracking:
            # Call the tracker
            tracker.predict()
            tracker.update(detections)

            for track in tracker.tracks:
                if not track.is_confirmed() or track.time_since_update > 1:
                    continue
                bbox = track.to_tlbr()
                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
                cv2.putText(frame, "ID: " + str(track.track_id),
                            (int(bbox[0]), int(bbox[1])), 0,
                            1.5e-3 * frame.shape[0], (0, 255, 0), 1)

        for det in detections:
            bbox = det.to_tlbr()
            score = "%.2f" % round(det.confidence * 100, 2) + "%"
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (255, 0, 0), 2)
            if len(classes) > 0:
                cls = det.cls
                cv2.putText(frame,
                            str(cls) + " " + score,
                            (int(bbox[0]), int(bbox[3])), 0,
                            1.5e-3 * frame.shape[0], (0, 255, 0), 1)

        cv2.imshow('', frame)

        if writeVideo_flag:  # and not asyncVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1

        fps_imutils.update()

        if not asyncVideo_flag:
            fps = (fps + (1. / (time.time() - t1))) / 2
            print("FPS = %f" % (fps))

        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps_imutils.stop()
    print('imutils FPS: {}'.format(fps_imutils.fps()))

    if asyncVideo_flag:
        video_capture.stop()
    else:
        video_capture.release()

    if writeVideo_flag:
        out.release()

    cv2.destroyAllWindows()
示例#16
0
    def detect_image(self, image):
        start = time.time()

        if self.is_fixed_size:
            assert self.model_image_size[
                0] % 32 == 0, 'Multiples of 32 required'
            assert self.model_image_size[
                1] % 32 == 0, 'Multiples of 32 required'
            boxed_image = letterbox_image(
                image, tuple(reversed(self.model_image_size)))
        else:
            new_image_size = (image.width - (image.width % 32),
                              image.height - (image.height % 32))
            boxed_image = letterbox_image(image, new_image_size)
        image_data = np.array(boxed_image, dtype='float32')

        print(image_data.shape)
        image_data /= 255.
        image_data = np.expand_dims(image_data, 0)  # Add batch dimension.

        out_boxes, out_scores, out_classes = self.sess.run(
            [self.boxes, self.scores, self.classes],
            feed_dict={
                self.yolo_model.input: image_data,
                self.input_image_shape: [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })

        print('Found {} boxes for {}'.format(len(out_boxes), 'img'))

        font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
                                  size=np.floor(3e-2 * image.size[1] +
                                                0.5).astype('int32'))
        thickness = (image.size[0] + image.size[1]) // 300

        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = self.class_names[c]
            box = out_boxes[i]
            score = out_scores[i]

            label = '{} {:.2f}'.format(predicted_class, score)
            draw = ImageDraw.Draw(image)
            label_size = draw.textsize(label, font)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
            right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
            print(label, (left, top), (right, bottom))

            if top - label_size[1] >= 0:
                text_origin = np.array([left, top - label_size[1]])
            else:
                text_origin = np.array([left, top + 1])

            # My kingdom for a good redistributable image drawing library.
            for i in range(thickness):
                draw.rectangle([left + i, top + i, right - i, bottom - i],
                               outline=self.colors[c])
            draw.rectangle(
                [tuple(text_origin),
                 tuple(text_origin + label_size)],
                fill=self.colors[c])
            draw.text(text_origin, label, fill=(0, 0, 0), font=font)
            del draw

        end = time.time()
        print(end - start)
        return image
示例#17
0
def opt_aux(atom_list, jmats, spins, tol = 1.0e-10):
    """This function separates the functionality of the optimizer from the
    files.  This method assumes that jmats is in the correct order. 
    ie. jnums looks like [0,1,2,3...]"""
    N_atoms = len(atom_list)
    # Generate the Jij and anisotropy arrays
    Jij = gen_Jij(atom_list,jmats)
    anis = gen_anisotropy(atom_list)
    # Get the spin magnitudes from the atoms in atom list
    spin_mags = []
    for atom in atom_list:
        spin_mags.append(atom.spinMag)
    spin_mags = np.array(spin_mags)
    
    # hamiltonian method
    def hamiltonian(p, Jij = None, spinMags = None, anis = None):
        """ Computes the hamiltonian given a list a thetas and phis"""
        # Thetas are the first half, phis the second half
        theta = p[:len(p)//2]
        phi = p[len(p)//2:]

        # Sx,Sy,Sz
        Sx = spinMags*sin(theta)*cos(phi)
        Sy = spinMags*sin(theta)*sin(phi)
        Sz = spinMags*cos(theta)
#        print 'local opt spins'
#        print Sx[0], Sy[0], Sz[0]

        # Array of spin vectors for each atom. Reshape it. Calculate hamiltonian with it and return the hamiltonian. 
        Sij = np.array([Sx,Sy,Sz])
        Sij = Sij.T.reshape(1,3*len(p)//2)[0].T
        
        SijT = Sij.T
        #res1 = SijT * Sij
        res1 = SijT*Jij
        Hij = np.dot(res1,Sij).flat[0]
        Ham = - Hij - np.dot(anis, Sij**2)

        return Ham 
    
    # derivative of the hamiltonian
    def deriv(p, Jij = None, spinMags = None, anis = None):
        """ Computes the derivative of the hamiltonian with respect to each theta and then each phi"""
        # Thetas are the first half, phis the second half
        half = len(p)/2
        theta = p[:half]
        phi = p[half:]        

        # Sx,Sy,Sz
        Sx = spinMags*sin(theta)*cos(phi)
        Sy = spinMags*sin(theta)*sin(phi)
        Sz = spinMags*cos(theta)

        # dSx/dtheta,dSy/dtheta,dSz/dtheta
        Sxt = spinMags*cos(theta)*cos(phi)
        Syt = spinMags*cos(theta)*sin(phi)
        Szt = -spinMags*sin(theta)
        # dSx/dphi,dSy/dphi,dSz/dphi
        Sxp = -spinMags*sin(theta)*sin(phi)
        Syp = spinMags*sin(theta)*cos(phi)
        Szp = 0*cos(theta)
        
        # Makes an array of the derivatives with respect to theta, then another with respect to phi
        # (dSx1/dtheta1,dSy1/dtheta1,dSz1/dtheta1
        # (                                      dSx2/dtheta2,dSy2/dtheta2,dSz2/dtheta2
        # (                                                                             ...
        # Similarly for phis
        # Then we multiply this by Jij which yields a half by 3*half array. We then multiply this by
        # the Sij vector which finally yields a half by 1 array. Thus
        # dSijt * Jij * Sij -> [dE/dtheta1, dE/dtheta2, dE/dtheta3, ...]
        # and similarly for phi
        dSijt = np.zeros((half,3*half))
        dSijp = np.zeros((half,3*half))
        dSijt[range(half),range(0,3*half,3)]=Sxt
        dSijt[range(half),range(1,3*half,3)]=Syt
        dSijt[range(half),range(2,3*half,3)]=Szt
        dSijp[range(half),range(0,3*half,3)]=Sxp
        dSijp[range(half),range(1,3*half,3)]=Syp
        dSijp[range(half),range(2,3*half,3)]=Szp
        
        # Standard Sij spin vector we've been using
        Sij = np.array([Sx,Sy,Sz])
        Sij = Sij.T.reshape(1,3*len(p)//2)[0].T

        # Calculate a hamiltonian for both theta and phi
        res1t = np.dot(dSijt * Jij, Sij)
        res2t = np.dot(Sij.T, Jij * dSijt.T)
        Hijt = res1t + res2t
        Hamt = - Hijt - np.matrix(np.dot(2*anis.T*Sij,dSijt.T))
        Hamt = Hamt.T

        res1p = np.dot(dSijp * Jij, Sij)
        res2p = np.dot(Sij.T, Jij * dSijp.T)
        Hijp = res1p + res2p
        Hamp = - Hijp - np.matrix(np.dot(2*anis.T*Sij,dSijp.T))
        Hamp = Hamp.T
        
        # Concatenate the two and return the result
        result = np.concatenate((np.array(Hamt),np.array(Hamp)))
        return result.T

    # populate initial p list
    # returns a numpy array of all the thetas followed by all the phis
    thetas = []
    phis = []
    if len(spins)!= N_atoms: raise Exception('poop')
    for i in range(N_atoms):
        sx = spins[i][0]
        sy = spins[i][1]
        sz = spins[i][2]
        s  = atom_list[i].spinMag

        theta = arccos(sz/s)
        phi   = np.arctan2(sy,sx)
        
        thetas.append(theta)
        phis.append(phi)
        
#        print 'initial spins'
#        print s*sin(theta)*cos(phi), s*sin(theta)*sin(phi), s*cos(theta)
    p0 = np.array(thetas+phis)

    # define the limits parameter list
    limits = []
    for i in range(len(p0)):
        if i < len(p0)//2:#theta
            limits.append((0,pi))
        else:#phi
            limits.append((-pi,pi))
    
    print "Local Optimization Beginning..."
    
    print "tolerance", tol
    st = time.time()
    # call minimizing function
    m = fmin_l_bfgs_b(hamiltonian, p0, fprime = deriv, args = (Jij, spin_mags, anis), pgtol=tol, bounds = limits)
    print time.time()-st, "seconds"
    print "Optimization Complete"
    # grab returned parameters
    # thetas are the first half of the list, phis are the second half
    pout=m[0]
    theta=pout[0:len(pout)/2]
    phi=pout[len(pout)/2::]
    # recreate sx,sy,sz's
    sx=spin_mags*sin(theta)*cos(phi)
    sy=spin_mags*sin(theta)*sin(phi)
    sz=spin_mags*cos(theta)

    return np.array([sx,sy,sz]).T   
示例#18
0
# Test if hysplit binary exists
hysplit_bin = 'C:\\hysplit4\\exec\\hyts_std.exe'

if not os.path.isfile(hysplit_bin): 
    print("Couldn't find hyts_std.exe. Please, check the hysplit installation or set\
      properly the script variable 'hysplit_bin'.")
    sys.exit()

print "Enter:"
meteo_dir = raw_input("Meteo directory (e.g. C:\\meteo): ")
output_dir = raw_input("Output directory (e.g. C:\\out): ")
csv_source = raw_input("Location of the csv file containing run specifications (e.g. C:\\runs.csv): ")

# Execution start time stamp
startTime = time.time()
# Load runs from csv file
csv_input = csv.reader(open(csv_source, 'r'))

# ASCDATA.CFG
ASCDATA = """-90.0   -180.0  lat/lon of lower left corner
1.0     1.0     lat/lon spacing in degrees
180     360     lat/lon number of data points
2               default land use category
0.2             default roughness length (m)
'C:/hysplit4/bdyfiles/'  directory of files
"""

# SETUP.CFG
SETUP = """&SETUP\ntratio = 0.75,\nmgmin = 15,\nkhmax = 9999,\nkmixd = 0,
kmsl = 0,\nnstr = 0,\nmhrs = 9999,\nnver = 0,\ntout = 60,\ntm_tpot = 0,
示例#19
0
def main(yolo):

   # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0
    
   # deep_sort 
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename,batch_size=1)
    
    metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
    tracker = Tracker(metric)

    writeVideo_flag = True 
    
    video_capture = cv2.VideoCapture(0)

    if writeVideo_flag:
    # Define the codec and create VideoWriter object
        w = int(video_capture.get(3))
        h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        out = cv2.VideoWriter('output.avi', fourcc, 15, (w, h))
        list_file = open('detection.txt', 'w')
        frame_index = -1 
        
    fps = 0.0
    while True:
        ret, frame = video_capture.read()  # frame shape 640*480*3
        if ret != True:
            break;
        t1 = time.time()

        image = Image.fromarray(frame)
        boxs = yolo.detect_image(image)
       # print("box_num",len(boxs))
        features = encoder(frame,boxs)
        
        # score to 1.0 here).
        detections = [Detection(bbox, 1.0, feature) for bbox, feature in zip(boxs, features)]
        
        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
        detections = [detections[i] for i in indices]
        
        # Call the tracker
        tracker.predict()
        tracker.update(detections)
        
        for track in tracker.tracks:
            if track.is_confirmed() and track.time_since_update >1 :
                continue 
            bbox = track.to_tlbr()
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(255,255,255), 2)
            cv2.putText(frame, str(track.track_id),(int(bbox[0]), int(bbox[1])),0, 5e-3 * 200, (0,255,0),2)

        for det in detections:
            bbox = det.to_tlbr()
            cv2.rectangle(frame,(int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(255,0,0), 2)
            
        cv2.imshow('', frame)
        
        if writeVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1
            list_file.write(str(frame_index)+' ')
            if len(boxs) != 0:
                for i in range(0,len(boxs)):
                    list_file.write(str(boxs[i][0]) + ' '+str(boxs[i][1]) + ' '+str(boxs[i][2]) + ' '+str(boxs[i][3]) + ' ')
            list_file.write('\n')
            
        fps  = ( fps + (1./(time.time()-t1)) ) / 2
        print("fps= %f"%(fps))
        
        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    video_capture.release()
    if writeVideo_flag:
        out.release()
        list_file.close()
    cv2.destroyAllWindows()
示例#20
0
def test():
    global TOTAL_TIME
    start = time.time()
    im = mapnik.Image(m.width,m.height)
    mapnik.render(m,im)
    TOTAL_TIME += (time.time() - start)
示例#21
0
def main(yolo):

    # Definition of the parameters
    max_cosine_distance = 0.2
    nn_budget = None
    nms_max_overlap = 1.0

    # deep_sort
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    video_capture = cv2.VideoCapture(0)
    fps = 0.0
    while True:
        ret, frame = video_capture.read()  # frame shape 640*480*3
        if ret != True:
            break
        t1 = time.time()

        image = Image.fromarray(frame)
        boxs = yolo.detect_image(image)

        features = encoder(frame, boxs)

        # score to 1.0 here).
        detections = [
            Detection(bbox, 1.0, feature)
            for bbox, feature in zip(boxs, features)
        ]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]

        # Call the tracker
        tracker.predict()
        tracker.update(detections)

        for track, det in zip(tracker.tracks, detections):
            bbox = track.to_tlbr()
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (255, 255, 255), 4)
            bbox = det.to_tlbr()
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (255, 0, 0), 4)
            cv2.putText(frame, str(track.track_id),
                        (int(bbox[0]), int(bbox[1])), 0, 5e-3 * 480,
                        (124, 252, 0), 4)
        cv2.imshow('', frame)

        fps = (fps + (1. / (time.time() - t1))) / 2
        print("fps= %f" % (fps))

        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    video_capture.release()
    cv2.destroyAllWindows()
示例#22
0
 def prepare(self):
     super(ComposeDebug, self).prepare()
     self.timing_started = True
     self.start_time = time.time()
     self.debug_msg("Nik2img starting...")
     self.debug_msg("Format: %s" % self.format)
示例#23
0
def main(yolo):

    # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0

    # deep_sort
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    writeVideo_flag = False

    #video_capture = cv2.VideoCapture("rtsp://*****:*****@192.168.30.81:554/3")

    #video_capture = cv2.VideoCapture("test.mp4")

    video_capture = cv2.VideoCapture(0)

    if writeVideo_flag:
        # Define the codec and create VideoWriter object
        w = int(video_capture.get(3))
        h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        out = cv2.VideoWriter('output.avi', fourcc, 15, (w, h))
        list_file = open('detection.txt', 'w')
        frame_index = -1

    fps = 0.0

    htr = HumanTraceRecorder()
    frameCounter = 0
    while True:
        ret, frame = video_capture.read()  # frame shape 640*480*3

        #frame = cv2.resize(frame,(640,360), interpolation=cv2.INTER_LINEAR)

        if ret != True:
            break
        t1 = time.time()

        image = Image.fromarray(frame)
        boxs = yolo.detect_image(image)
        # print("box_num",len(boxs))
        features = encoder(frame, boxs)

        # score to 1.0 here).
        detections = [
            Detection(bbox, 1.0, feature)
            for bbox, feature in zip(boxs, features)
        ]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]

        # Call the tracker
        tracker.predict()
        tracker.update(detections)

        for track in tracker.tracks:
            if track.is_confirmed() and track.time_since_update > 1:
                continue
            bbox = track.to_tlbr()
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
            print(track.name_id)

            if (frameCounter % 5) == 0:

                x = int((bbox[0] + bbox[2]) / 2)
                y = int((bbox[1] + bbox[3]) / 2)
                new_time = frameCounter

                if track.name_id != 0:  # this is a tracked person
                    htr.updatePerson(track.name_id, x, y, new_time)
                else:
                    subimage = image.crop((int(bbox[0]), int(bbox[1]),
                                           int(bbox[2]), int(bbox[3])))
                    subimagearr = np.asarray(subimage)
                    faceID = getFaceID(
                        subimagearr,
                        predictor_path=
                        "./deep_sort/traceRecording/shape_predictor_5_face_landmarks.dat",
                        face_rec_model_path=
                        "./deep_sort/traceRecording/dlib_face_recognition_resnet_model_v1.dat"
                    )
                    if faceID == 0:
                        print("No face found")
                    else:
                        find, name_id = htr.checkPerson(faceID)
                        if find:
                            track.name_id = htr.updatePerson(
                                name_id, x, y, new_time)
                        else:
                            track.name_id = htr.addNewPerson(
                                faceID, x, y, new_time)

            cv2.putText(frame, str(track.name_id),
                        (int(bbox[0]), int(bbox[1])), 0, 5e-3 * 200,
                        (0, 255, 0), 2)
        frameCounter += 1
        """
            if track.track_id != 0: # this is a tracked person
                dataBase.addPersonTrace(track.track_id, location) # add the new location to the person's trace
            else :  # new detected person, need to verify the identity
            
            
                subimage = image(cv2.Rect(int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])))
                result = faceDetect(subimage) # result should be the face data detected


                if result: # make sure there is a face detection
                    if dataBase.checkPerson(result): # check if the person was recorded before
                        name = dataBase.checkName(result)
                    else: # new person, add to dataBase
                        newName = XXXXXXXXX # XXXXXXXXX is the new name
                        dataBase.addPerson(result, newName) 
                        name = newName
                    dataBase.addPersonTrace(name, location) # add the new location to the person's trace

                    track.track_id = name
                else : # no face detected, cannot identify the person
                    # do nothing, leave track unchange and wait for the next chance
        """

        for det in detections:
            bbox = det.to_tlbr()
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (255, 0, 0), 2)

        cv2.imshow('', frame)

        if writeVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1
            list_file.write(str(frame_index) + ' ')
            if len(boxs) != 0:
                for i in range(0, len(boxs)):
                    list_file.write(
                        str(boxs[i][0]) + ' ' + str(boxs[i][1]) + ' ' +
                        str(boxs[i][2]) + ' ' + str(boxs[i][3]) + ' ')
            list_file.write('\n')

        fps = (fps + (1. / (time.time() - t1))) / 2
        print("fps= %f" % (fps))

        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    video_capture.release()
    if writeVideo_flag:
        out.release()
        list_file.close()
    cv2.destroyAllWindows()

    htr.saveToFile()
示例#24
0
def main(yolo):
    points=[]
    tpro=0.
   # Definition of the parameters
    max_cosine_distance = 0.9
    nn_budget = None
    nms_max_overlap = 1.0
    
   # deep_sort 
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename,batch_size=1)
    
    metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
    tracker = Tracker(metric)

    writeVideo_flag = True 
    
    video_capture = cv2.VideoCapture(0)

    if writeVideo_flag:
    # Define the codec and create VideoWriter object
        w = int(video_capture.get(3))
        h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        out = cv2.VideoWriter('output.avi', fourcc, 15, (w, h))
        list_file = open('detection.txt', 'w')
        frame_index = -1 
        
    fps = 0.0
    while True:
        ret, frame = video_capture.read()  # frame shape 640*480*3
        if ret != True:
            break;
        
        frame=cv2.flip(frame,1)
        image = Image.fromarray(frame)
        

        # ___________________________________________________________________________DETECT WITH YOLO 
        t1 = time.time()
        

        boxs = yolo.detect_image(image)



        # print("box_num",len(boxs))
        features = encoder(frame,boxs)
        
        # score to 1.0 here).
        detections = [Detection(bbox, 1.0, feature) for bbox, feature in zip(boxs, features)]
        
        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
        
        detections = [detections[i] for i in indices]

        

        # ___________________________________________________________________________DRAW DETECT BOX


        to_move=[]
        for det in detections:
            bbox = det.to_tlbr()
            cv2.rectangle(frame,(int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(0,0,255), 1)


            temp=int(bbox[0]),int(bbox[1]),int(bbox[2]),int(bbox[3])
            to_move.append(temp)




        
       
        # now feed tracked box to move

        # ___________________________________________________________________________MOVE

        if to_move :
            

            # Initial co-ordinates of the object to be tracked 
            # Create the tracker object
            mover = [dlib.correlation_tracker() for _ in range(len(to_move))]
            # Provide the tracker the initial position of the object



            [mover[i].start_track(frame, dlib.rectangle(*rect)) for i, rect in enumerate(to_move)] ## FEED FIRST BOX HERE

            for i in range (0,100):  ##### START LOOP MOVER
                ret, frame = video_capture.read()  # tempo
                full_frame_mover=[]        
                frame=cv2.flip(frame,1)    #tempo


                # Update the mover
                for i in range(len(mover)):


                    #_____________FEED NEW IMAGE
                    mover[i].update(frame)
                    


                    #_________________DRAW
                    rect = mover[i].get_position()
                    pt1 = (int(rect.left()), int(rect.top()))
                    pt2 = (int(rect.right()), int(rect.bottom()))

                    cv2.rectangle(frame, pt1, pt2, (255, 255, 255), 3)
                    
                    full_frame_mover.append((pt1,pt2))
                    #print(full_frame_mover) # finish 1 frame

                    
                    


                    # ___________________________________________________________________________Call the tracker 
                    tracker.predict()
                    tracker.update(detections)
                    
                    

                    # ___________________________________________________________________________DRAW TRACK RECTANGLE 
                    
                    
                    for track in tracker.tracks:
                        if track.is_confirmed() and track.time_since_update >1 :
                            continue 
                        
                        bbox = track.to_tlbr()







                        cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(255,255,255), 2)
                        cv2.putText(frame, str(track.track_id),(int(bbox[0]), int(bbox[1])+30),0, 5e-3 * 200, (0,255,0),3)

                        dot=int(int(bbox[0])+((int(bbox[2])-int(bbox[0]))/2)),int(bbox[3]-10)
                        
                        cv2.circle(frame,dot, 10, (0,0,255), -1)


                    
                    
                    


                
  
                cv2.imshow('', frame)
                # Continue until the user presses ESC key
                if cv2.waitKey(1) == 27:
                    break

            # END LOOP MOVER




















        # ___________________________________________________________________________Call the tracker 
        tracker.predict()
        tracker.update(full_frame_mover)
        
        

        # ___________________________________________________________________________DRAW TRACK RECTANGLE 
        
        
        for track in tracker.tracks:
            if track.is_confirmed() and track.time_since_update >1 :
                continue 
            
            bbox = track.to_tlbr()







            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(255,255,255), 2)
            cv2.putText(frame, str(track.track_id),(int(bbox[0]), int(bbox[1])+30),0, 5e-3 * 200, (0,255,0),3)

            dot=int(int(bbox[0])+((int(bbox[2])-int(bbox[0]))/2)),int(bbox[3]-10)
            
            cv2.circle(frame,dot, 10, (0,0,255), -1)

        

































        # ___________________________________________________________________________GET POINTS From click

        if(cv2.waitKey(1)==ord('p')):
            points = get_lines.run(frame, multi=True) 
            print(points)
        if points :
            for line in points:
                cv2.line(frame, line[0:2], line[2:5], (0,255,255), 2) # draw line








        cv2.imshow('', frame)
        
        print('process time : ',time.time()-tpro)
        tpro=time.time()

        if writeVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1
            list_file.write(str(frame_index)+' ')
            if len(boxs) != 0:
                for i in range(0,len(boxs)):
                    list_file.write(str(boxs[i][0]) + ' '+str(boxs[i][1]) + ' '+str(boxs[i][2]) + ' '+str(boxs[i][3]) + ' ')
            list_file.write('\n')
            

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    video_capture.release()
    if writeVideo_flag:
        out.release()
        list_file.close()
    cv2.destroyAllWindows()
示例#25
0
if not os.path.isfile(hysplit_bin):
    print(
        "Couldn't find hyts_std.exe. Please, check the hysplit installation or set\
      properly the script variable 'hysplit_bin'.")
    sys.exit()

print "Enter:"
meteo_dir = raw_input("Meteo directory (e.g. C:\\meteo): ")
output_dir = raw_input("Output directory (e.g. C:\\out): ")
csv_source = raw_input(
    "Location of the csv file containing run specifications (e.g. C:\\runs.csv): "
)

# Execution start time stamp
startTime = time.time()
# Load runs from csv file
csv_input = csv.reader(open(csv_source, 'r'))

# ASCDATA.CFG
ASCDATA = """-90.0   -180.0  lat/lon of lower left corner
1.0     1.0     lat/lon spacing in degrees
180     360     lat/lon number of data points
2               default land use category
0.2             default roughness length (m)
'C:/hysplit4/bdyfiles/'  directory of files
"""

# SETUP.CFG
SETUP = """&SETUP\ntratio = 0.75,\nmgmin = 15,\nkhmax = 9999,\nkmixd = 0,
kmsl = 0,\nnstr = 0,\nmhrs = 9999,\nnver = 0,\ntout = 60,\ntm_tpot = 0,
示例#26
0
  "DE"
  DE = diffEvol(model, data);
#   set_trace()
  res = sorted([k for k in DE.DE()],
               key = lambda F: F[-1])[-1]
  return res

def tuner(model, data):
  if model == rforest:
    return _de(tuneRF, data)
  elif model == CART:
    return _de(tuneCART, data)

if __name__ == '__main__':
  from timeit import time
  data = explore(dir = '../Data/')[0][-1]  # Only training data to tune.
  for m in [tuneRF, tuneCART]:
    t = time.time()
    mdl = m(data)
#   _test(data)
    tunings = _de(m, data)
    print tunings
    print mdl.depen(tunings)
    print time.time() - t
#   print _de()
#  print main()
#  import sk; xtile = sk.xtile
#  print xtile(G)

 # main(dir = 'Data/')
示例#27
0
文件: omni.py 项目: guodj/work
                    'V':'$V_SW$ (km/s)', 'Vx':'$V_x$ (km/s)',
                    'Vy':'$V_y$ (km/s)', 'Vz':'$V_z$ (km/s)', # flow speed
                    'ProDen':'Proton Density ($N/cm^3$)',
                    'ProTmp':'Proton Temperature (k)',
                    'SYMD': 'SYM/D (nT)','SYMH': 'SYM/H (nT)',
                    'ASYD': 'ASY/D (nT)','ASYH': 'ASY/H (nT)',
                    'Kp':'Kp', 'R':'R','DST':'DST','ap':'ap','f107':'f10.7'}
    omni_data = get_omni(bdate,edate,variables,res)
    omni_data = pd.DataFrame(omni_data)
    for k00, k0 in enumerate(variables):
        plt.sca(ax[k00]) if len(variables)>1 else plt.sca(ax)
        plt.plot(omni_data.index, omni_data[k0], **kwargs)
        plt.ylabel(variablename[k0])
    return
#END
#------------------------------------------------------------------------------
# TEST
if __name__ == '__main__':
    # Test plot_omni
    #    fig,ax = plt.subplots(1,1,sharex=True)
    #    plot_omni(ax,'2010-1-1','2010-1-31',['Bx'],res='5minute')
    # Test get_omni and get_omni2
    from timeit import time
    import matplotlib.pyplot as plt
    begintime = time.time()
    omni1 = get_omni('2010-1-1', '2010-12-31', ['AE'])
    endtime = time.time()
    print(endtime-begintime)
    plt.plot(omni1['AE'])

def main(yolo):

    start = time.time()
    #Definition of the parameters
    max_cosine_distance = 0.5  #余弦距离的控制阈值
    nn_budget = None
    nms_max_overlap = 0.3  #非极大抑制的阈值

    counter = []
    #deep_sort
    model_filename = 'model_data/market1501.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    writeVideo_flag = True
    #video_path = "./output/output.avi"
    video_capture = cv2.VideoCapture(args["input"])

    if writeVideo_flag:
        # Define the codec and create VideoWriter object
        w = int(video_capture.get(3))
        h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        out = cv2.VideoWriter(
            './output/' + args["input"][43:57] + "_" + args["class"] +
            '_output.avi', fourcc, 15, (w, h))
        list_file = open('detection.txt', 'w')
        frame_index = -1

    fps = 0.0

    while True:

        ret, frame = video_capture.read()  # frame shape 640*480*3
        if ret != True:
            break
        t1 = time.time()

        # image = Image.fromarray(frame)
        image = Image.fromarray(frame[..., ::-1])  #bgr to rgb
        boxs, class_names = yolo.detect_image(image)
        features = encoder(frame, boxs)
        # score to 1.0 here).
        detections = [
            Detection(bbox, 1.0, feature)
            for bbox, feature in zip(boxs, features)
        ]
        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]

        # Call the tracker
        tracker.predict()
        tracker.update(detections)

        i = int(0)
        indexIDs = []
        c = []
        boxes = []
        for det in detections:
            bbox = det.to_tlbr()
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)

        for track in tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            #boxes.append([track[0], track[1], track[2], track[3]])
            indexIDs.append(int(track.track_id))
            counter.append(int(track.track_id))
            bbox = track.to_tlbr()
            color = [int(c) for c in COLORS[indexIDs[i] % len(COLORS)]]

            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (color), 3)
            cv2.putText(frame, str(track.track_id),
                        (int(bbox[0]), int(bbox[1] - 50)), 0, 5e-3 * 150,
                        (color), 2)
            if len(class_names) > 0:
                class_name = class_names[0]
                cv2.putText(frame, str(class_names[0]),
                            (int(bbox[0]), int(bbox[1] - 20)), 0, 5e-3 * 150,
                            (color), 2)

            i += 1
            #bbox_center_point(x,y)
            center = (int(
                ((bbox[0]) + (bbox[2])) / 2), int(((bbox[1]) + (bbox[3])) / 2))
            #track_id[center]
            pts[track.track_id].append(center)
            thickness = 5
            #center point
            cv2.circle(frame, (center), 1, color, thickness)

            #draw motion path
            for j in range(1, len(pts[track.track_id])):
                if pts[track.track_id][j - 1] is None or pts[
                        track.track_id][j] is None:
                    continue
                thickness = int(np.sqrt(64 / float(j + 1)) * 2)
                cv2.line(frame, (pts[track.track_id][j - 1]),
                         (pts[track.track_id][j]), (color), thickness)
                #cv2.putText(frame, str(class_names[j]),(int(bbox[0]), int(bbox[1] -20)),0, 5e-3 * 150, (255,255,255),2)

        count = len(set(counter))
        cv2.putText(frame, "Total Object Counter: " + str(count),
                    (int(20), int(120)), 0, 5e-3 * 200, (0, 255, 0), 2)
        cv2.putText(frame, "Current Object Counter: " + str(i),
                    (int(20), int(80)), 0, 5e-3 * 200, (0, 255, 0), 2)
        cv2.putText(frame, "FPS: %f" % (fps), (int(20), int(40)), 0,
                    5e-3 * 200, (0, 255, 0), 3)
        cv2.namedWindow("YOLO3_Deep_SORT", 0)
        cv2.resizeWindow('YOLO3_Deep_SORT', 1024, 768)
        cv2.imshow('YOLO3_Deep_SORT', frame)

        if writeVideo_flag:
            #save a frame
            out.write(frame)
            frame_index = frame_index + 1
            list_file.write(str(frame_index) + ' ')
            if len(boxs) != 0:
                for i in range(0, len(boxs)):
                    list_file.write(
                        str(boxs[i][0]) + ' ' + str(boxs[i][1]) + ' ' +
                        str(boxs[i][2]) + ' ' + str(boxs[i][3]) + ' ')
            list_file.write('\n')
        fps = (fps + (1. / (time.time() - t1))) / 2
        #print(set(counter))

        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    print(" ")
    print("[Finish]")
    end = time.time()

    if len(pts[track.track_id]) != None:
        print(args["input"][43:57] + ": " + str(count) + " " +
              str(class_name) + ' Found')

    else:
        print("[No Found]")

    video_capture.release()

    if writeVideo_flag:
        out.release()
        list_file.close()
    cv2.destroyAllWindows()
示例#29
0
def main(yolo,read_type):

   # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0
    
   # deep_sort 
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename,batch_size=1)
    
    metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
    tracker = Tracker(metric)

    #writeVideo_flag = True

    #geneate a video object
    video_dir='./model_data/demo2.wmv'
    video=video_open(read_type,video_dir)
    video_capture = video.generate_video()
    fps=0
    while True:

        ret, frame = video_capture.read()  # frame shape 640*480*3
        if ret != True:
            break;
        t1 = time.time()

        # 1、yolov3进行目标检测,找到目标位置和相应信息
        # 2、跟踪物体在图像中的变化轨迹

        # 1、yolov3进行目标检测,找到目标位置和相应信息
        image = Image.fromarray(frame)
        time3=time.time()
        boxs = yolo.detect_image(image)
        time4=time.time()
        print('detect cost is',time4-time3)
       # print("box_num",len(boxs))
        time3=time.time()
        features = encoder(frame,boxs)
        
        # score to 1.0 here).
        detections = [Detection(bbox, 1.0, feature) for bbox, feature in zip(boxs, features)]
        
        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
        detections = [detections[i] for i in indices]
        time4=time.time()
        print('features extract is',time4-time3)
        # Call the tracker
        # 2、跟踪物体在图像中的变化轨迹。利用卡尔曼滤波进行位置修正
        tracker.predict()
        tracker.update(detections)
        
        for track in tracker.tracks:
            if track.is_confirmed() and track.time_since_update >1 :
                continue 
            bbox = track.to_tlbr()
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(255,255,255), 2)
            cv2.putText(frame, str(track.track_id),(int(bbox[0]), int(bbox[1])),0, 5e-3 * 200, (0,255,0),2)

        for det in detections:
            bbox = det.to_tlbr()
            cv2.rectangle(frame,(int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(255,0,0), 2)
            
        cv2.imshow('', frame)

        fps  = ( fps + (1./(time.time()-t1)) ) / 2
        print("fps= %f"%(fps))
        
        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    video_capture.release()

    cv2.destroyAllWindows()
示例#30
0
def main(yolo):

    start = time.time()
    max_cosine_distance = 0.5
    nn_budget = None
    nms_max_overlap = 0.3

    counter = []
    #deep_sort
    model_filename = 'model_data/market1501.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    find_objects = ['person']
    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    writeVideo_flag = True
    video_capture = cv2.VideoCapture(args["input"])

    if writeVideo_flag:
        # Define the codec and create VideoWriter object
        w = int(video_capture.get(3))
        h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        out = cv2.VideoWriter('./output/output.avi', fourcc, 15, (w, h))
        list_file = open('detection_rslt.txt', 'w')
        frame_index = -1

    fps = 0.0

    while True:

        ret, frame = video_capture.read()  # frame shape 640*480*3
        if ret != True:
            break
        t1 = time.time()

        classIDs = []
        #image = Image.fromarray(frame)
        image = Image.fromarray(frame[..., ::-1])  #bgr to rgb
        boxs, confidence, class_names = yolo.detect_image(image)
        features = encoder(frame, boxs)
        # score to 1.0 here).
        detections = [
            Detection(bbox, 1.0, feature)
            for bbox, feature in zip(boxs, features)
        ]
        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]

        # Call the tracker
        tracker.predict()
        tracker.update(detections)

        i = int(0)
        indexIDs = []
        c = []
        boxes = []
        center2 = []
        co_info = []
        x_l = []
        y_l = []
        s_close_pair = []
        for det in detections:
            bbox = det.to_tlbr()
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
            #print(class_names)
            #print(class_names[p])

        for track in tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            #boxes.append([track[0], track[1], track[2], track[3]])
            indexIDs.append(int(track.track_id))
            counter.append(int(track.track_id))
            bbox = track.to_tlbr()
            color = [int(c) for c in COLORS[indexIDs[i] % len(COLORS)]]
            #print(frame_index)
            list_file.write(str(frame_index) + ',')
            list_file.write(str(track.track_id) + ',')
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (color), 3)
            b0 = str(bbox[0]
                     )  #.split('.')[0] + '.' + str(bbox[0]).split('.')[0][:1]
            b1 = str(bbox[1]
                     )  #.split('.')[0] + '.' + str(bbox[1]).split('.')[0][:1]
            b2 = str(bbox[2] - bbox[0]
                     )  #.split('.')[0] + '.' + str(bbox[3]).split('.')[0][:1]
            b3 = str(bbox[3] - bbox[1])

            list_file.write(
                str(b0) + ',' + str(b1) + ',' + str(b2) + ',' + str(b3))
            #print(str(track.track_id))
            list_file.write('\n')
            #list_file.write(str(track.track_id)+',')
            cv2.putText(frame, "ID:" + str(track.track_id),
                        (int(bbox[0]), int(bbox[1] - 50)), 0, 5e-3 * 150,
                        (color), 2)
            if len(class_names) > 0:
                class_name = class_names[0]
                cv2.putText(frame, str(class_names[0]),
                            (int(bbox[0]), int(bbox[1] - 20)), 0, 5e-3 * 150,
                            (color), 2)

            i += 1
            #bbox_center_point(x,y)
            center = (int(
                ((bbox[0]) + (bbox[2])) / 2), int(((bbox[1]) + (bbox[3])) / 2))
            #track_id[center]
            pts[track.track_id].append(center)
            thickness = 5
            # draw distance line
            (w, h) = (bbox[2], bbox[3])
            center2.append(center)
            co_info.append([w, h, center2])
            #print(center2)

            #calculateDistance
            if len(center2) > 2:
                for i in range(len(center2)):
                    for j in range(len(center2)):
                        #g = isclose(co_info[i],co_info[j])
                        #D = dist.euclidean((center2[i]), (center2[j]))
                        x1 = center2[i][0]
                        y1 = center2[i][1]
                        x2 = center2[j][0]
                        y2 = center2[j][1]
                        dis = calculateDistance(x1, y1, x2, y2)

                        if dis < 200:
                            #print(dis)
                            cv2.line(frame, (center2[i]), (center2[j]),
                                     (0, 128, 255), 2)

                        if dis < 100:
                            #x_l.append(center2[i])
                            cv2.line(frame, (center2[i]), (center2[j]),
                                     (0, 0, 255), 5)
                            #cv2.putText(frame, "KEEP DISTANCE",(int(960), int(1060)),0, 5e-3 * 200, (0,0,255),2)

            else:
                pass

            #center point
            cv2.circle(frame, (center), 1, color, thickness)

            # draw motion path
            for j in range(1, len(pts[track.track_id])):
                if pts[track.track_id][j - 1] is None or pts[
                        track.track_id][j] is None:
                    continue
                thickness = int(np.sqrt(64 / float(j + 1)) * 2)
                #cv2.line(frame,(pts[track.track_id][j-1]), (pts[track.track_id][j]),(color),thickness)

        count = len(set(counter))
        cv2.putText(frame, "Total Pedestrian Counter: " + str(count),
                    (int(20), int(120)), 0, 5e-3 * 200, (0, 255, 0), 2)
        cv2.putText(frame, "Current Pedestrian Counter: " + str(i),
                    (int(20), int(80)), 0, 5e-3 * 200, (0, 255, 0), 2)
        cv2.putText(frame, "FPS: %f" % (fps * 2), (int(20), int(40)), 0,
                    5e-3 * 200, (0, 255, 0), 3)
        cv2.namedWindow("YOLO3_Deep_SORT", 0)
        cv2.resizeWindow('YOLO3_Deep_SORT', 1024, 768)
        cv2.imshow('YOLO3_Deep_SORT', frame)

        if writeVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1

        fps = (fps + (1. / (time.time() - t1))) / 2
        out.write(frame)
        frame_index = frame_index + 1

        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    print(" ")
    print("[Finish]")
    end = time.time()

    if len(pts[track.track_id]) != None:
        print(args["input"][43:57] + ": " + str(count) + " " +
              str(class_name) + ' Found')

    else:
        print("[No Found]")
    video_capture.release()
    if writeVideo_flag:
        out.release()
        list_file.close()
    cv2.destroyAllWindows()
def main(yolo):

    # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0

    # deep_sort
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    writeVideo_flag = True

    video_capture = cv2.VideoCapture('top_view1.avi')

    if writeVideo_flag:
        # Define the codec and create VideoWriter object
        w = int(video_capture.get(3))
        h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        out = cv2.VideoWriter('output1.avi', fourcc, 15, (w, h))
        list_file = open('detection.txt', 'w')
        frame_index = -1

    fps = 0.0
    fig = plt.figure()
    count = 0
    x_list = []
    y_list = []
    # ax1 = fig.add_subplot(1, 1, 1)
    while True:
        ret, frame = video_capture.read()  # frame shape 640*480*3
        if ret != True:
            print('NO VIDEO FOUND')
            break

        t1 = time.time()

        # image = Image.fromarray(frame)
        image = Image.fromarray(frame[..., ::-1])  # bgr to rgb
        boxs = yolo.detect_image(image)
        # print("box_co-ordinate = ", (boxs))

        # for i in boxs:
        #     print(i[0][0])

        features = encoder(frame, boxs)

        # score to 1.0 here).
        detections = [
            Detection(bbox, 1.0, feature)
            for bbox, feature in zip(boxs, features)
        ]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]

        # Call the tracker
        tracker.predict()
        tracker.update(detections)

        for track in tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            bbox = track.to_tlbr()
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
            cv2.putText(frame, str(track.track_id),
                        (int(bbox[0]), int(bbox[1])), 0, 5e-3 * 200,
                        (0, 255, 0), 2)

        for det in detections:

            bbox = det.to_tlbr()

            # print((type(bbox)))
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (255, 0, 0), 2)
            # print("The co-ordinates are:", int(bbox[0]), int(bbox[1]))

        try:

            for i in boxs:
                x = (i[0] + i[2]) / 2
                y = (i[1] + i[3]) / 2
                count += 1
                x_list.append(x)
                y_list.append(y)
                if count == 1:
                    points = plt.scatter(x_list, y_list)
                elif count > 1:
                    print('x:', x_list, 'y:', y_list)
                    points.remove()
                    points = plt.scatter(x_list, y_list)
                    # plt.pause(0.9)
            x_list.clear()
            y_list.clear()

        except:
            continue

            # redraw the canvas
        fig.canvas.draw()

        # convert canvas to image
        img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
        img = img.reshape(fig.canvas.get_width_height()[::-1] + (3, ))

        # img is rgb, convert to opencv's default bgr
        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)

        # display image with opencv or any operation you like
        cv2.imshow("plot", img)

        cv2.imshow('', frame)

        if writeVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1
            list_file.write(str(frame_index) + ' ')
            if len(boxs) != 0:
                for i in range(0, len(boxs)):
                    list_file.write(
                        str(boxs[i][0]) + ' ' + str(boxs[i][1]) + ' ' +
                        str(boxs[i][2]) + ' ' + str(boxs[i][3]) + ' ')
            list_file.write('\n')

        fps = (fps + (1. / (time.time() - t1))) / 2
        print("fps= %f" % (fps))

        # Press Q to stop!
        if cv2.waitKey(5) & 0xFF == ord('q'):
            break

    video_capture.release()
    if writeVideo_flag:
        out.release()
        list_file.close()
    cv2.destroyAllWindows()
示例#32
0
def dealVideoWithYoloAndDeepSort(yolo, video_list):
    # Definition of the parameters

    for video_info in video_list:
        # 对应监控地点的阈值
        address = video_info['address']
        video_path = video_info['video_path']
        addr = Address.objects.filter(
            address=address).first()  # video_info传过来是字符串,eval将其转化成字典
        threshold = addr.threshold
        VID = -1

        max_cosine_distance = 0.3
        nn_budget = None
        nms_max_overlap = 1.0

        # deep_sort
        model_filename = 'edgeapp/model_data/mars-small128.pb'
        encoder = gdet.create_box_encoder(model_filename, batch_size=1)

        metric = nn_matching.NearestNeighborDistanceMetric(
            "cosine", max_cosine_distance, nn_budget)
        tracker = Tracker(metric)

        writeVideo_flag = True

        # 视频路径   若为0则检测摄像头

        video_capture = cv2.VideoCapture(video_path)

        if writeVideo_flag:
            # Define the codec and create VideoWriter object
            w = int(video_capture.get(3))
            h = int(video_capture.get(4))

            out_video_fps = 30
            # avi 格式
            # fourcc = cv2.VideoWriter_fourcc(*'MJPG')
            # out = cv2.VideoWriter('output.avi', fourcc, out_video_fps, (w, h))

            # 生成mp4格式的输出视频 此“不支持”html5的 video标签播放
            # fourcc = cv2.VideoWriter_fourcc(*'mp4v')
            # out = cv2.VideoWriter('output.mp4', fourcc, out_video_fps, (w, h))

            # 生成mp4格式的输出视频 此“支持”html5的 video标签播放
            fourcc = cv2.VideoWriter_fourcc(*'avc1')
            out = cv2.VideoWriter(
                'edgeapp/Result_Video/{}_output.mp4'.format(address), fourcc,
                out_video_fps, (w, h))

            list_file = open('detection.txt', 'w')
            frame_index = -1

        fps = 0.0
        while True:
            ret, frame = video_capture.read()  # frame shape 640*480*3
            if ret != True:
                break
            t1 = time.time()

            # image = Image.fromarray(frame)
            image = Image.fromarray(frame[..., ::-1])  # bgr to rgb

            boxs = yolo.detect_image(image)

            # 统计人数
            person_count = len(boxs)
            print("视频中的人数:{}".format(person_count))

            ## HTTP 业务逻辑处理
            if (person_count >= threshold) and (VID == -1):
                # 让web服务器新建立一个异常视频对象
                print("------开始建立异常视频对象")
                res = requests.get("http://127.0.0.1:8080/push_new/",
                                   params={
                                       "vid": VID,
                                       "address": address,
                                       "number": person_count
                                   })
                res_dic = eval(res.text)
                VID = res_dic['vid']
            if VID != -1:
                # 持续向web服务器推送当前监控地点的人数
                print("------推送人数")
                requests.get("http://127.0.0.1:8080/deal_new/",
                             params={
                                 "vid": VID,
                                 "address": address,
                                 "number": person_count
                             })

            features = encoder(frame, boxs)

            # score to 1.0 here.
            detections = [
                Detection(bbox, 1.0, feature)
                for bbox, feature in zip(boxs, features)
            ]

            # Run non-maxima suppression.
            boxes = np.array([d.tlwh for d in detections])
            scores = np.array([d.confidence for d in detections])
            indices = preprocessing.non_max_suppression(
                boxes, nms_max_overlap, scores)
            detections = [detections[i] for i in indices]

            # Call the tracker
            tracker.predict()
            tracker.update(detections)

            for track in tracker.tracks:
                if not track.is_confirmed() or track.time_since_update > 1:
                    continue
                bbox = track.to_tlbr()
                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
                cv2.putText(frame, str(track.track_id),
                            (int(bbox[0]), int(bbox[1])), 0, 5e-3 * 200,
                            (0, 255, 0), 2)

            for det in detections:
                bbox = det.to_tlbr()
                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])), (255, 0, 0), 2)

            # 是否实时同步展示标记过的视频
            # cv2.imshow('', frame)

            if writeVideo_flag:
                # save a frame
                out.write(frame)
                frame_index = frame_index + 1
                list_file.write(str(frame_index) + ' ')
                if len(boxs) != 0:
                    for i in range(0, len(boxs)):
                        list_file.write(
                            str(boxs[i][0]) + ' ' + str(boxs[i][1]) + ' ' +
                            str(boxs[i][2]) + ' ' + str(boxs[i][3]) + ' ')
                list_file.write('\n')

            fps = (fps + (1. / (time.time() - t1))) / 2
            print("fps= %f" % (fps))

            # Press Q to stop!
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        video_capture.release()
        if writeVideo_flag:
            out.release()
            list_file.close()
        cv2.destroyAllWindows()
        # 传送视频
        send_video = open('edgeapp/Result_Video/{}_output.mp4'.format(address),
                          'rb')
        files = {'file': send_video}
        res = requests.post("http://127.0.0.1:8080/store_video/",
                            files=files,
                            data={"vid": VID})
        # 关闭文件
        send_video.close()

    # 销毁yolo对象
    del yolo
示例#33
0
def main():

    t0 = time.time()

    problem = setup()

    ## Base Input Values
    output = problem.objective()
    print output
    Plot_Mission.plot_mission(problem, -1)

    # # Uncomment to view contours of the design space
    # variable_sweep(problem)

    print ' '
    print ' Initial Guess Results '
    print ' '
    print 'Fuel Burn   = ', float(problem.summary.base_mission_fuelburn)
    print 'Cruise Fuel = ', float(problem.summary.cruise_fuel)
    print 'Block  Fuel = ', float(problem.summary.block_fuel)
    print 'MTOW        = ', float(problem.summary.MTOW)
    print 'BOW         = ', float(problem.summary.BOW)
    print 'TOFL        = ', float(problem.summary.takeoff_field_length)
    print 'GRAD        = ', float(problem.summary.second_segment_climb_gradient_takeoff)
    print 'Cruise Alt  = ', float(problem.summary.cruise_altitude)
    print 'Design Ran  = ', float(problem.summary.design_range)
    print 'Cruise Ran  = ', float(problem.summary.cruise_range)
    print 'Total Ran   = ', float(problem.summary.total_range)
    print 'Time To Cli = ', float(problem.summary.time_to_climb_value)
    print 'TOW HH      = ', float(problem.summary.TOW_HH)
    print 'Fuel HH     = ', float(problem.summary.FUEL_HH)
    print ' '
    print 'Constraints = ', problem.all_constraints()
    print ' '
    last_inputs = problem.last_inputs[:, 1]
    print 'S           = ', last_inputs[0]
    print 'AR          = ', last_inputs[1]
    print 't/c         = ', last_inputs[2]
    print 'Sweep Ang   = ', last_inputs[3]
    # ------------------------------------------------------------------
    # Pareto

    fuelburn = []
    allconstraints = []
    MTOW = []
    finalvalue = []
    grad = []
    tofl = []

    # betas = [1., 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.]
    betas = [0.5, 0.25, 0.]
    # betas = [0.]
    fid = open('Pareto_results.txt', 'w')  # Open output file
    fid.write(' Pareto results \n')
    fid.close()

    for i, beta in enumerate(betas):
        fid = open('Pareto_results.txt', 'ab')  # Open output file
        fid.write('Pareto Frontier. Run number: ' + str(i) + ' \n')
        print('Pareto Frontier. Run number: ' + str(i))

        # updating Beta value
        design_vars = problem.optimization_problem.inputs[:, 1]
        design_vars[-1] = beta
        design_vars[0] = 75.
        design_vars[1] = 8.0
        design_vars[2] = 0.11
        design_vars[3] = 20.
        problem.optimization_problem.inputs[:, 1] = design_vars

        bounds = problem.optimization_problem.inputs[:, 2]
        bounds[-1] = list(bounds[-1])
        bounds[-1][0] = beta
        bounds[-1][1] = beta
        bounds[-1] = tuple(bounds[-1])
        problem.optimization_problem.inputs[:, 2] = bounds

        output = scipy_setup.SciPy_Solve(problem, solver='SLSQP')

        finalvalue.append(output)

        print output
        print ' '
        print ' Final Results '
        print ' '
        print 'Fuel Burn   = ', float(problem.summary.base_mission_fuelburn)
        print 'Cruise Fuel = ', float(problem.summary.cruise_fuel)
        print 'Block  Fuel = ', float(problem.summary.block_fuel)
        print 'MTOW        = ', float(problem.summary.MTOW)
        print 'BOW         = ', float(problem.summary.BOW)
        print 'TOFL        = ', float(problem.summary.takeoff_field_length)
        print 'GRAD        = ', float(problem.summary.second_segment_climb_gradient_takeoff)
        print 'Cruise Alt  = ', float(problem.summary.cruise_altitude)
        print 'Design Ran  = ', float(problem.summary.design_range)
        print 'Cruise Ran  = ', float(problem.summary.cruise_range)
        print 'Total Ran   = ', float(problem.summary.total_range)
        print 'Time To Cli = ', float(problem.summary.time_to_climb_value)
        print 'TOW HH      = ', float(problem.summary.TOW_HH)
        print 'Fuel HH     = ', float(problem.summary.FUEL_HH)
        print ' '
        print 'Constraints = ', problem.all_constraints()
        print ' '
        last_inputs = finalvalue[-1]
        print 'S           = ', last_inputs[0]
        print 'AR          = ', last_inputs[1]
        print 't/c         = ', last_inputs[2]
        print 'Sweep Ang   = ', last_inputs[3]
        # -----------------------------------------
        Plot_Mission.plot_mission(problem, i)


        fuelburn.append(problem.summary.base_mission_fuelburn)
        allconstraints.append(problem.all_constraints())
        grad.append(problem.summary.second_segment_climb_gradient_takeoff)
        tofl.append(problem.summary.takeoff_field_length)
        MTOW.append(problem.summary.MTOW)

        fid.write(str(fuelburn[-1])+' \n')
        fid.write(str(grad[-1]) + ' \n')
        fid.write(str(tofl[-1]) + ' \n')
        fid.write(str(MTOW[-1]) + ' \n')
        fid.write(str(allconstraints[-1]) + ' \n')
        fid.write(str(finalvalue[-1]) + ' \n')
        fid.write('\n \n')
        fid.close()

    fid = open('Pareto_results.txt', 'ab')  # Open output file
    elapsed = time.time() - t0

    fid.write('Total run time: ' + str(elapsed))
    print('Total run time: ' + str(elapsed))

    fid.close()

    return
示例#34
0
def main(yolo):

    print('Using {} model'.format(yolo))
       
   # Definition of the parameters
    max_cosine_distance = 0.2
    nn_budget = None
    nms_max_overlap = 0.4
    
   # deep_sort 
    model_filename = 'model_data/models/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename,batch_size=1) # use to get feature
    
    metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
    tracker = Tracker(metric, max_age=100)

    output_frames = []
    output_rectanger = []
    output_areas = []
    output_wh_ratio = []

    is_vis = True
    out_dir = 'videos/output/'
    print('The output folder is',out_dir)
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)

    all_frames = []
    for video in args.videos:
        loadvideo = LoadVideo(video)
        video_capture, frame_rate, w, h = loadvideo.get_VideoLabels()
        while True:
            ret, frame = video_capture.read() 
            if ret != True:
                video_capture.release()
                break
            all_frames.append(frame)

    frame_nums = len(all_frames)
    tracking_path = out_dir+'tracking'+'.avi'
    combined_path = out_dir+'allVideos'+'.avi'
    if is_vis:
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        out = cv2.VideoWriter(tracking_path, fourcc, frame_rate, (w, h))
        out2 = cv2.VideoWriter(combined_path, fourcc, frame_rate, (w, h))
        #Combine all videos
        for frame in all_frames:
            out2.write(frame)
        out2.release()
        
    #Initialize tracking file
    filename = out_dir+'/tracking.txt'
    open(filename, 'w')
    
    fps = 0.0
    frame_cnt = 0
    t1 = time.time()
    
    track_cnt = dict()
    images_by_id = dict()
    ids_per_frame = []
    for frame in all_frames:
        image = Image.fromarray(frame[...,::-1]) #bgr to rgb
        boxs = yolo.detect_image(image) # n * [topleft_x, topleft_y, w, h]
        features = encoder(frame,boxs) # n * 128
        detections = [Detection(bbox, 1.0, feature) for bbox, feature in zip(boxs, features)] # length = n
        text_scale, text_thickness, line_thickness = get_FrameLabels(frame)

        
        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.delete_overlap_box(boxes, nms_max_overlap, scores) #preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
        detections = [detections[i] for i in indices] # length = len(indices)

        # Call the tracker 
        tracker.predict()
        tracker.update(detections)
        tmp_ids = []
        for track in tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue 
            
            bbox = track.to_tlbr()
            area = (int(bbox[2]) - int(bbox[0])) * (int(bbox[3]) - int(bbox[1]))
            if bbox[0] >= 0 and bbox[1] >= 0 and bbox[3] < h and bbox[2] < w:
                tmp_ids.append(track.track_id)
                if track.track_id not in track_cnt:
                    track_cnt[track.track_id] = [[frame_cnt, int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]), area]]
                    images_by_id[track.track_id] = [frame[int(bbox[1]):int(bbox[3]), int(bbox[0]):int(bbox[2])]]
                else:
                    track_cnt[track.track_id].append([frame_cnt, int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]), area])
                    images_by_id[track.track_id].append(frame[int(bbox[1]):int(bbox[3]), int(bbox[0]):int(bbox[2])])
            cv2_addBox(track.track_id,frame,int(bbox[0]),int(bbox[1]),int(bbox[2]),int(bbox[3]),line_thickness,text_thickness,text_scale)
            write_results(filename,'mot',frame_cnt+1,str(track.track_id),int(bbox[0]),int(bbox[1]),int(bbox[2]),int(bbox[3]),w,h)
        ids_per_frame.append(set(tmp_ids))

        # save a frame               
        if is_vis:
            out.write(frame)
        t2 = time.time()
        
        frame_cnt += 1
        print(frame_cnt, '/', frame_nums)

    if is_vis:
        out.release()
    print('Tracking finished in {} seconds'.format(int(time.time() - t1)))
    print('Tracked video : {}'.format(tracking_path))
    print('Combined video : {}'.format(combined_path))

    os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
    reid = REID()
    threshold = 320
    exist_ids = set()
    final_fuse_id = dict()

    print('Total IDs = ',len(images_by_id))
    feats = dict()
    for i in images_by_id:
        print('ID number {} -> Number of frames {}'.format(i, len(images_by_id[i])))
        feats[i] = reid._features(images_by_id[i]) #reid._features(images_by_id[i][:min(len(images_by_id[i]),100)])
    
    ids_per_frame2 = copy.deepcopy(ids_per_frame)
    
    for f in ids_per_frame:
        if f:
            if len(exist_ids) == 0:
                for i in f:
                    final_fuse_id[i] = [i]
                exist_ids = exist_ids or f
            else:
                new_ids = f-exist_ids
                for nid in new_ids:
                    dis = []
                    if len(images_by_id[nid])<10:
                        exist_ids.add(nid)
                        continue
                    unpickable = []
                    for i in f:
                        for key,item in final_fuse_id.items():
                            if i in item:
                                unpickable += final_fuse_id[key]
                    print('exist_ids {} unpickable {}'.format(exist_ids,unpickable))
                    for oid in (exist_ids-set(unpickable))&set(final_fuse_id.keys()):
                        tmp = np.mean(reid.compute_distance(feats[nid],feats[oid]))
                        print('nid {}, oid {}, tmp {}'.format(nid, oid, tmp))
                        dis.append([oid, tmp])
                    exist_ids.add(nid)
                    if not dis:
                        final_fuse_id[nid] = [nid]
                        continue
                    dis.sort(key=operator.itemgetter(1))
                    if dis[0][1] < threshold:
                        combined_id = dis[0][0]
                        images_by_id[combined_id] += images_by_id[nid]
                        final_fuse_id[combined_id].append(nid)
                    else:
                        final_fuse_id[nid] = [nid]
    print('Final ids and their sub-ids:',final_fuse_id)
    print('MOT took {} seconds'.format(int(time.time() - t1)))
    t2 = time.time()

    # To generate MOT for each person, declare 'is_vis' to True
    is_vis=False
    if is_vis:
        print('Writing videos for each ID...')
        output_dir = 'videos/output/tracklets/'
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)
        loadvideo = LoadVideo(combined_path)
        video_capture,frame_rate, w, h = loadvideo.get_VideoLabels()
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        for idx in final_fuse_id:
            tracking_path = os.path.join(output_dir, str(idx)+'.avi')
            out = cv2.VideoWriter(tracking_path, fourcc, frame_rate, (w, h))
            for i in final_fuse_id[idx]:
                for f in track_cnt[i]:
                    video_capture.set(cv2.CAP_PROP_POS_FRAMES, f[0])
                    _, frame = video_capture.read()
                    text_scale, text_thickness, line_thickness = get_FrameLabels(frame)
                    cv2_addBox(idx, frame, f[1], f[2], f[3], f[4], line_thickness, text_thickness, text_scale)
                    out.write(frame)
            out.release()
        video_capture.release()

    # Generate a single video with complete MOT/ReID              
    if args.all:
        loadvideo = LoadVideo(combined_path)
        video_capture, frame_rate, w, h = loadvideo.get_VideoLabels()
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        complete_path = out_dir+'/Complete'+'.avi'
        out = cv2.VideoWriter(complete_path, fourcc, frame_rate, (w, h))
        
        for frame in range(len(all_frames)):
            frame2 = all_frames[frame]
            video_capture.set(cv2.CAP_PROP_POS_FRAMES, frame)
            _, frame2 = video_capture.read()
            for idx in final_fuse_id:
                for i in final_fuse_id[idx]:
                    for f in track_cnt[i]:
                        #print('frame {} f0 {}'.format(frame,f[0]))
                        if frame == f[0]:
                            text_scale, text_thickness, line_thickness = get_FrameLabels(frame2)
                            cv2_addBox(idx, frame2, f[1], f[2], f[3], f[4], line_thickness, text_thickness, text_scale)
            out.write(frame2)
        out.release()
        video_capture.release()

    os.remove(combined_path)
    print('\nWriting videos took {} seconds'.format(int(time.time() - t2)))
    print('Final video at {}'.format(complete_path))
    print('Total: {} seconds'.format(int(time.time() - t1)))
示例#35
0
def main(yolo):

    # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0
    
    # Deep SORT
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)
    
    metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
    tracker = Tracker(metric)

    tracking = True
    writeVideo_flag = False
    asyncVideo_flag = False
    webcamera_flag = True
    ipcamera_flag = False
    udp_flag = True

    file_path = '/workspace/data/C0133_v4.mp4'
    if asyncVideo_flag :
        video_capture = VideoCaptureAsync(file_path)
    elif ipcamera_flag :
        video_capture = cv2.VideoCapture('rtsp://*****:*****@192.168.2.201/ONVIF/MediaInput?profile=def_profile1')
    elif webcamera_flag :
        video_capture = cv2.VideoCapture(0)
    else:
        video_capture = cv2.VideoCapture(file_path)
        
    if asyncVideo_flag:
        video_capture.start()

    if writeVideo_flag:
        if asyncVideo_flag:
            w = int(video_capture.cap.get(3))
            h = int(video_capture.cap.get(4))
        else:
            w = int(video_capture.get(3))
            h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter('output_yolov4.avi', fourcc, 30, (w, h))
        frame_index = -1

    if udp_flag:
        HOST = ''
        PORT = 5000
        address = '192.168.2.255'
        sock =socket(AF_INET, SOCK_DGRAM)
        sock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
        sock.bind((HOST, PORT))


    fps = 0.0
    fps_imutils = imutils.video.FPS().start()
    
    savetime = 0

    while True:
        nowtime = datetime.datetime.now().isoformat()
        ret, frame = video_capture.read()  # frame shape 640*480*3
        t1 = time.time()

        if time.time() - savetime >= 30: 
            print('save data') 
            cv2.imwrite("/workspace/images/image.png", frame)
            savetime = time.time()
        image = Image.fromarray(frame[...,::-1])  # bgr to rgb
        boxes, confidence, classes = yolo.detect_image(image)

        if tracking:
            features = encoder(frame, boxes)

            detections = [Detection(bbox, confidence, cls, feature) for bbox, confidence, cls, feature in
                        zip(boxes, confidence, classes, features)]
        else:
            detections = [Detection_YOLO(bbox, confidence, cls) for bbox, confidence, cls in
                        zip(boxes, confidence, classes)]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
        detections = [detections[i] for i in indices]

        if tracking:
            # Call the tracker
            tracker.predict()
            tracker.update(detections)

            for track in tracker.tracks:
                if not track.is_confirmed() or track.time_since_update > 1:
                    continue
                bbox = track.to_tlbr()
                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
                cv2.putText(frame, "ID: " + str(track.track_id), (int(bbox[0]), int(bbox[1])), 0,
                            1.5e-3 * frame.shape[0], (0, 255, 0), 1)
                # socket
                message = str(nowtime + "," + str(track.track_id) + "," + str(int(bbox[0])) + "," + str(int(bbox[1])) + "," + str(int(bbox[2])) + "," + str(int(bbox[3])))
                bmessage = message.encode('utf-8')
                print(type(bmessage))
                if udp_flag:
                    sock.sendto(message.encode('utf-8'), (address, PORT))


        for det in detections:
            bbox = det.to_tlbr()
            score = "%.2f" % round(det.confidence * 100, 2) + "%"
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 0, 0), 2)
            if len(classes) > 0:
                cls = det.cls
                cv2.putText(frame, str(cls) + " " + score, (int(bbox[0]), int(bbox[3])), 0,
                            1.5e-3 * frame.shape[0], (0, 255, 0), 1)

        #cv2.imshow('', frame)

        if writeVideo_flag: # and not asyncVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1

        fps_imutils.update()

        if not asyncVideo_flag:
            fps = (fps + (1./(time.time()-t1))) / 2
            print("FPS = %f"%(fps))
        
        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps_imutils.stop()
    print('imutils FPS: {}'.format(fps_imutils.fps()))

    if asyncVideo_flag:
        video_capture.stop()
    else:
        video_capture.release()

    if writeVideo_flag:
        out.release()

    cv2.destroyAllWindows()
示例#36
0
class dataType:
    Dixon = 'Dixon'
    Rao = 'Rao'


CELLTYPE = cellType.K526
DataType = dataType.Rao

if CELLTYPE == 'hIMR90' or CELLTYPE == 'hES' or CELLTYPE == 'K526':
    NUM_OF_CHRMS = 22
elif CELLTYPE == 'mES' or CELLTYPE == 'mCO':
    NUM_OF_CHRMS = 20
for CHRM in range(1, NUM_OF_CHRMS + 1):
    if DataType == dataType.Rao:
        print('Loading Chromosome ' + str(CHRM))
        st = time.time()
        if PANDAS_INSTALLED:
            if RESOLUTION < 1000000:
                name = str(int(RESOLUTION / 1000)) + 'kb'
            else:
                name = str(int(RESOLUTION / 1000000)) + 'mb'
            chr1Data = pd.read_csv(
                os.path.abspath(os.sep) + 'Dataset/' + CELLTYPE + '/' + name +
                '_resolution_intrachromosomal/chr' + str(CHRM) +
                '/MAPQGE30/chr' + str(CHRM) + '_' + name + '.RAWobserved',
                sep='\t',
                header=None)
            chr1Data = chr1Data.values
            chr1Data[:, 0:2] = np.floor(chr1Data[:, 0:2] / RESOLUTION)
            knorm = pd.read_csv(
                os.path.abspath(os.sep) + 'Dataset/' + CELLTYPE + '/' + name +
from random import randrange
from selection_sort import selection_sort
from bubble_sort import bubble_sort
from timeit import time

# Creo la lista
# Lista di partenza da ordinare
mylist = [randrange(1000) for _ in range(10000)]

# Calcolo i tempi del selection sort....
selection_sort_time = selection_sort(mylist.copy())

# Calcolo i tempi del bubble sort....
bubble_sort_time = bubble_sort(mylist.copy())

# Calcolo i tempi del sort di default di python....
newlist = mylist.copy()

start_time = time.time()
newlist.sort()
stop_time = time.time()

default_sort_time = stop_time - start_time

# Salvo i risultati su file
result_file = open("results.csv", "a")
result_file.write(
    f"{len(mylist)},{selection_sort_time},{bubble_sort_time},{default_sort_time}\n"
)
result_file.close()
示例#38
0
    def findTADs(intgMAT):
        st = time.time()
        intgMAT2 = np.lib.pad(intgMAT, ((maxL, maxL), (maxL, maxL)),
                              'constant',
                              constant_values=0)
        # find potential starts/ends of TADs
        det_scores = np.zeros([N, 3], dtype=np.float64)
        det_scores = det_score(intgMAT2, np.arange(N), [W, W, W])
        S = np.sum(det_scores[:, 0]) / N
        det_scores[:, 1] = det_scores[:, 1] / S
        det_scores[:, 2] = det_scores[:, 2] / S

        start_peaks = np.asarray(detect_peaks(det_scores[:, 2], mpd=2),
                                 dtype=np.int64)  #np.median(det_scores[:,1])
        end_peaks = np.asarray(detect_peaks(det_scores[:, 1], mpd=2),
                               dtype=np.int64)  #np.median(det_scores[:,2])

        # we need to know the preceding end for each start
        prev_end = np.zeros(len(start_peaks), dtype=np.int64)
        k_old = 0
        for i in range(len(start_peaks)):
            k = k_old
            for j in range(k, len(end_peaks)):
                if end_peaks[k] <= start_peaks[i]:
                    k = k + 1
                else:
                    break
            if k == 0:
                prev_end[i] = -1
                k_old = 0
            elif end_peaks[k - 1] < start_peaks[0]:
                prev_end[i] = -1
                k_old = k - 1
            else:
                prev_end[i] = k - 1
                k_old = k - 1
        # we need to know the preceding start for each end
        prev_start = np.zeros(len(end_peaks), dtype=np.int64)
        k_old = 0
        for i in range(len(end_peaks)):
            k = k_old
            for j in range(k, len(start_peaks)):
                if start_peaks[k] <= end_peaks[i]:
                    k = k + 1
                else:
                    break
            if k == 0:
                prev_start[i] = -1
                k_old = 0
            else:
                prev_start[i] = k - 1
                k_old = k - 1

        # pre-compute scores for start/end combinations
        M1 = len(start_peaks)
        M2 = len(end_peaks)

        rep = []
        indx = []
        for i in range(M1):
            ind = prev_end[i]
            indx.append(min(ind + 1, len(end_peaks)))
            if ind == -1:
                rep.append(M2)
            elif ind == M2:
                rep.append(0)
            else:
                rep.append(M2 - ind - 1)

        long_i = np.repeat(start_peaks, rep)
        long_j = np.zeros(np.sum(rep), dtype=np.int64)
        for i in range(M1):
            start = sum(rep[0:i])
            long_j[start:start + rep[i]] = end_peaks[indx[i]:len(end_peaks)]

        long_l = long_j - long_i + 1
        temp_s = score(
            intgMAT2, long_i, long_l
        )  #long_i is the center of a domain; long_l is the length of that domain

        # build a dense representation of the 'scores' but much smaller than N*N (for higher resolutions)
        i2I = np.zeros(N, dtype=np.int64)
        for i in range(len(start_peaks)):
            i2I[start_peaks[i]] = i
        j2J = np.zeros(N, dtype=np.int64)
        for j in range(len(end_peaks)):
            j2J[end_peaks[j]] = j

        scores = np.zeros([len(start_peaks), len(end_peaks)], dtype=np.float64)
        scores[i2I[long_i], j2J[long_j]] = temp_s

        # Dynamic Programming applies on potential start/end TAD boundaries we have found above
        T = np.zeros(len(end_peaks), dtype=np.float64)
        backT = np.zeros(
            len(end_peaks), dtype=np.int64
        )  # shows the index of the start point (out of all start points) for each end point
        k_old = 0
        for j in range(len(end_peaks)):
            if prev_start[j] == -1:
                backT[j] = -1
                continue
            k = k_old
            kk = np.arange(int(prev_start[j]) + 1)
            vals = parDP(scores, i2I, j2J, T, start_peaks, end_peaks, prev_end,
                         j, kk)
            T[j] = np.max([np.max(vals), T[j - 1]])
            backT[j] = np.argmax(vals)
            k_old = k

        # Extracting TADs based on backT
        counter = len(end_peaks) - 1
        tadCount = 0
        TADx1 = []
        TADx2 = []
        while True:  # backT[counter] != 0 and backT[counter] != -1:

            if backT[counter] == -1:
                break
            elif backT[counter] == 0:
                TADx2.append(end_peaks[counter])
                TADx1.append(start_peaks[0])
                tadCount = tadCount + 1
                break
            elif counter > 0:
                TADx2.append(end_peaks[counter])
                TADx1.append(start_peaks[backT[counter]])
                tadCount = tadCount + 1
                counter = prev_end[backT[counter]]

                if prev_end[backT[counter]] == counter:
                    counter = counter - 1
                if counter == -1:
                    break
            elif counter == 0:
                TADx2.append(end_peaks[counter])
                TADx1.append(start_peaks[backT[counter]])
                tadCount = tadCount + 1
                break
        print('Number of TADs:', tadCount)
        TADx1.sort()
        TADx2.sort()
        TADx1 = (np.asarray(TADx1)).reshape([len(TADx1), 1])
        TADx2 = (np.asarray(TADx2)).reshape([len(TADx2), 1])
        TAD = np.concatenate((TADx1, TADx2), axis=1)
        np.savetxt(CELLTYPE + '_nij_chr' + str(CHRM) + '_' + str(N),
                   TAD,
                   delimiter=' ',
                   fmt='%d')
示例#39
0
def main(yolo):

    # Definition of the parameters
    max_cosine_distance = 0.2
    nn_budget = None
    nms_max_overlap = 0.4

    # deep_sort
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename,
                                      batch_size=1)  # use to get feature

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric, max_age=10)

    output_frames = []
    output_rectanger = []
    output_areas = []
    output_wh_ratio = []

    is_vis = True
    videos = ['videos/0701/1_1.mp4']

    all_frames = []
    for video in videos:
        video_capture = cv2.VideoCapture(video)
        w = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
        h = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
        frame_rate = video_capture.get(cv2.CAP_PROP_FPS)
        while True:
            ret, frame = video_capture.read()
            if ret != True:
                video_capture.release()
                break
            all_frames.append(frame)

    frame_nums = len(all_frames)
    if is_vis:
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        output_path = video.split('.')[0] + '_output' + '.avi'
        out = cv2.VideoWriter(output_path, fourcc, frame_rate, (w, h))

    fps = 0.0
    frame_cnt = 0
    t1 = time.time()

    track_cnt = dict()
    images_by_id = dict()
    ids_per_frame = []
    for frame in all_frames:
        image = Image.fromarray(frame[..., ::-1])  #bgr to rgb
        boxs = yolo.detect_image(image)  # n * [topleft_x, topleft_y, w, h]
        features = encoder(frame, boxs)  # n * 128
        detections = [
            Detection(bbox, 1.0, feature)
            for bbox, feature in zip(boxs, features)
        ]  # length = n

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        #indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
        indices = preprocessing.delete_overlap_box(boxes, nms_max_overlap,
                                                   scores)
        detections = [detections[i] for i in indices]  # length = len(indices)

        # Call the tracker
        tracker.predict()
        tracker.update(detections)
        tmp_ids = []
        for track in tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue

            bbox = track.to_tlbr()
            area = (int(bbox[2]) - int(bbox[0])) * (int(bbox[3]) -
                                                    int(bbox[1]))
            if bbox[0] >= 0 and bbox[1] >= 0 and bbox[3] < h and bbox[2] < w:
                tmp_ids.append(track.track_id)
                if track.track_id not in track_cnt:
                    track_cnt[track.track_id] = [[
                        frame_cnt,
                        int(bbox[0]),
                        int(bbox[1]),
                        int(bbox[2]),
                        int(bbox[3]), area
                    ]]
                    images_by_id[track.track_id] = [
                        frame[int(bbox[1]):int(bbox[3]),
                              int(bbox[0]):int(bbox[2])]
                    ]
                else:
                    track_cnt[track.track_id].append([
                        frame_cnt,
                        int(bbox[0]),
                        int(bbox[1]),
                        int(bbox[2]),
                        int(bbox[3]), area
                    ])
                    images_by_id[track.track_id].append(
                        frame[int(bbox[1]):int(bbox[3]),
                              int(bbox[0]):int(bbox[2])])

            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
            cv2.putText(frame, str(track.track_id),
                        (int(bbox[0]), int(bbox[1])), 0, 2, (0, 255, 0), 2)
        ids_per_frame.append(set(tmp_ids))

        if is_vis:
            # save a frame
            out.write(frame)
        t2 = time.time()

        frame_cnt += 1
        print(frame_cnt, '/', frame_nums)

    if is_vis:
        out.release()

    for i in images_by_id:
        print(i, len(images_by_id[i]))

    os.environ["CUDA_VISIBLE_DEVICES"] = "3"
    reid = REID()
    threshold = 250
    exist_ids = set()
    final_fuse_id = dict()
    for f in ids_per_frame:
        if f:
            if len(exist_ids) == 0:
                for i in f:
                    final_fuse_id[i] = [i]
                exist_ids = exist_ids or f
            else:
                new_ids = f - exist_ids
                for nid in new_ids:
                    dis = []
                    if len(images_by_id[nid]) < 10:
                        exist_ids.add(nid)
                        continue
                    unpickable = []
                    for i in f:
                        for key, item in final_fuse_id.items():
                            if i in item:
                                unpickable += final_fuse_id[key]

                    for oid in (exist_ids - set(unpickable)) & set(
                            final_fuse_id.keys()):
                        tmp = np.mean(
                            reid.get_features(images_by_id[nid],
                                              images_by_id[oid]))
                        print(nid, oid, tmp)
                        dis.append([oid, tmp])
                    exist_ids.add(nid)
                    if not dis:
                        final_fuse_id[nid] = [nid]
                        continue
                    dis.sort(key=operator.itemgetter(1))
                    if dis[0][1] < threshold:
                        combined_id = dis[0][0]
                        images_by_id[combined_id] += images_by_id[nid]
                        final_fuse_id[combined_id].append(nid)
                    else:
                        final_fuse_id[nid] = [nid]
    print(final_fuse_id)

    if is_vis:
        print('writing video...')
        output_dir = 'videos/' + videos[0].split('/')[-1].split('.')[0]
        print(output_dir)
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)
        video_capture = cv2.VideoCapture(videos[0])
        w = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
        h = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
        frame_rate = video_capture.get(cv2.CAP_PROP_FPS)
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        for idx in final_fuse_id:
            output_path = os.path.join(output_dir, str(idx) + '.avi')
            out = cv2.VideoWriter(output_path, fourcc, frame_rate, (w, h))
            for i in final_fuse_id[idx]:
                print(idx, i)
                for f in track_cnt[i]:
                    video_capture.set(cv2.CAP_PROP_POS_FRAMES, f[0])
                    _, frame = video_capture.read()
                    cv2.rectangle(frame, (f[1], f[2]), (f[3], f[4]),
                                  (255, 0, 0), 2)
                    out.write(frame)
            out.release()
        video_capture.release()
示例#40
0
def main(yolo):

    # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 0.7

    # deep_sort
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    writeVideo_flag = True
    webcam_flag = False
    resize_flag = True
    resize_size = (800, 450)

    # some links from earthcam https://github.com/Crazycook/Working/blob/master/Webcams.txt    https://www.vlcm3u.com/web-cam-live/
    # video_url = 'https://videos3.earthcam.com/fecnetwork/lacitytours1.flv/chunklist_w683585821.m3u8' # HOLLYWOOD
    # video_url = 'https://videos3.earthcam.com/fecnetwork/9974.flv/chunklist_w1421640637.m3u8' # NYC
    # video_url = 'https://videos3.earthcam.com/fecnetwork/5775.flv/chunklist_w1803081483.m3u8' # NYC 2
    # video_url = 'http://181.1.29.189:60001/cgi-bin/snapshot.cgi?chn=0&u=admin'
    # video_url = 'https://videos-3.earthcam.com/fecnetwork/15559.flv/chunklist_w573709200.m3u8' # NYC 3
    video_url = 'https://hddn01.skylinewebcams.com/live.m3u8?a=97psdt8nv2hsmclta3nuu4di94'

    if webcam_flag:
        video_capture = cv2.VideoCapture(0)
    else:
        video_capture = cv2.VideoCapture()
        video_capture.set(cv2.CAP_PROP_BUFFERSIZE, 2)
        video_capture.open(video_url)

    if writeVideo_flag:
        # Define the codec and create VideoWriter object
        w = int(video_capture.get(3))
        h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        out = cv2.VideoWriter('output.avi', fourcc, 15, (w, h))
        list_file = open('detection.txt', 'w')
        frame_index = -1

    fps = 0.0
    while True:
        ret, frame = video_capture.read()  # frame shape 640*480*3
        if ret != True:
            break
        t1 = time.time()

        if resize_flag:
            frame = cv2.resize(frame,
                               resize_size,
                               interpolation=cv2.INTER_AREA)

    # image = Image.fromarray(frame)
        image = Image.fromarray(frame[..., ::-1])  #bgr to rgb
        boxs = yolo.detect_image(image)
        # print("box_num",len(boxs))
        if np.array(boxs).size > 0:
            features = encoder(frame, np.array(boxs)[:, 0:4].tolist())

            class_names = yolo.class_names

            # score to 1.0 here).
            detections = [
                Detection(bbox, 1.0, feature)
                for bbox, feature in zip(boxs, features)
            ]

            # Run non-maxima suppression.
            boxes = np.array([d.tlwh for d in detections])
            scores = np.array([d.confidence for d in detections])
            indices = preprocessing.non_max_suppression(
                boxes, nms_max_overlap, scores)
            detections = [detections[i] for i in indices]

            #Call the tracker
            tracker.predict()
            tracker.update(detections)

            for track in tracker.tracks:
                if not track.is_confirmed() or track.time_since_update > 1:
                    continue
                bbox = track.to_tlbr()
                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
                cv2.putText(frame, str(track.track_id),
                            (int(bbox[0]), int(bbox[1]) - 10), 0, 5e-3 * 100,
                            (0, 0, 255), 2)

            for det in detections:
                bbox = det.to_tlbr()
                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])), (255, 0, 0), 2)
                cv2.putText(
                    frame, class_names[int(det.label)] + "(" +
                    str(round(det.score, 2)) + ")",
                    (int(bbox[0]), int(bbox[3])), 0, 5e-3 * 90, (255, 0, 0), 2)
                #cv2.putText(frame, str(int(bbox[0])) + "-" + str(int(bbox[3])) ,(int(bbox[0]), int(bbox[3])),0, 5e-3 * 90, (0,0,255),2)

        cv2.imshow('', frame)

        if writeVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1
            list_file.write(str(frame_index) + ' ')
            if len(boxs) != 0:
                for i in range(0, len(boxs)):
                    list_file.write(
                        str(boxs[i][0]) + ' ' + str(boxs[i][1]) + ' ' +
                        str(boxs[i][2]) + ' ' + str(boxs[i][3]) + ' ')
            list_file.write('\n')

        fps = (fps + (1. / (time.time() - t1))) / 2
        print("fps= %f" % (fps))

        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    video_capture.release()
    if writeVideo_flag:
        out.release()
        list_file.close()
    cv2.destroyAllWindows()
def main(yolo):
    # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0

    # deep_sort
    model_filename = './models/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
    tracker = Tracker(metric)


    writeVideo_flag = True

    video_capture = cv2.VideoCapture(args.input_video)

    if writeVideo_flag:
    # Define the codec and create VideoWriter object
        w = int(video_capture.get(3))
        h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        out = cv2.VideoWriter('/media/rudy/C0E28D29E28D252E/2017_taroko/终点前/video_YDXJ0101.avi', fourcc, 15, (w, h))
        list_file = open('/media/rudy/C0E28D29E28D252E/2017_taroko/终点前/detection_YDXJ0101.txt', 'w')
        frame_index = -1 
        
    fps = 0.0
    n = 0

    id_frame = 0
    while True:
        ret, frame = video_capture.read()  # frame shape 640*480*3
        if ret != True:
            break

        if int(args.skip_frame) != n:
            n += 1
            continue
        n = 0

        t1 = time.time()

        image = Image.fromarray(frame)
        boxs, classes = yolo.detect_image(image)

        for idb, box in enumerate(boxs):
            cv2.rectangle(frame, (int(box[0]), int(box[1])), (int(box[0])+int(box[2]), int(box[1])+int(box[3])),(255,255,255), 2)
            cv2.putText(frame, str(classes[idb]),(int(box[0]), int(box[1])),0, 5e-3 * 100, (0,255,0),2)

        features = encoder(frame, boxs)

        # score to 1.0 here).
        detections = [Detection(bbox, 1.0, feature) for bbox, feature in zip(boxs, features)]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
        detections = [detections[i] for i in indices]

        # Call the tracker
        tracker.predict()
        tracker.update(detections)

        for track in tracker.tracks:
            if track.is_confirmed() and track.time_since_update > 1:
                continue
            bbox = track.to_tlbr()
            # cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
            # cv2.putText(frame, str(track.track_id), (int(bbox[0]), int(bbox[1])), 0, 5e-3 * 200, (0, 255, 0), 2)

        for idb, det in enumerate(detections):
            bbox = det.to_tlbr()
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 0, 0), 2)
            # cv2.putText(frame, str(classes[idb]), (int(bbox[0]), int(bbox[1])), 0, 5e-3 * 100, (0, 255, 0), 2)


        # cv2.imshow('gallery', frame)
        
        if writeVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1
            list_file.write(str(frame_index)+' ')
            if len(boxs) != 0:
                for i in range(0,len(boxs)):
                    list_file.write(str(boxs[i][0]) + ' '+str(boxs[i][1]) + ' '+str(boxs[i][2]) + ' '+str(boxs[i][3]) + ';')
            list_file.write('\n')
            
        fps  = ( fps + (1./(time.time()-t1)) ) / 2

        id_frame +=1
        print("idx_frame-%d, fps= %f"%(id_frame, fps))
        
        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    video_capture.release()
    if writeVideo_flag:
        out.release()
        list_file.close()
    cv2.destroyAllWindows()
示例#42
0
def main():
    counter = []
    writeVideo_flag = False
    display = False
    fps = 0.0
    info_cam = load_list_video(list_video_path, id_path)

    max_cosine_distance = 0.8
    nn_budget = 100
    nms_max_overlap = 1.0
    for info in info_cam:
        path = os.path.join(video_path, info[1])
        ROI, MOI = load_roi_moi(zones_path, mois_path, info[1])
        frame_delay = load_delay('../Dataset_A/time_delay.txt', info[1])
        print('delay', frame_delay)
        print('MOI', MOI)
        car_class = OT(1, 'Car')
        truck_class = OT(2, 'Truck')
        objects = [car_class, truck_class]
        print("Processing video: ", info)
        video_capture = cv2.VideoCapture(path)
        if writeVideo_flag:
            # Define the codec and create VideoWriter object
            w = int(video_capture.get(3))
            h = int(video_capture.get(4))
            fourcc = cv2.VideoWriter_fourcc(*'MJPG')
            out = cv2.VideoWriter('result_' + info[1] + '.avi', fourcc,
                                  info[-1], (w, h))

        pause_display = False
        frame_num = 17900
        data = []
        start_video = time.time()
        while True:

            start = time.time()
            video_capture.set(cv2.CAP_PROP_POS_FRAMES, frame_num)
            ret, frame = video_capture.read()  # frame shape 640*480*3

            if ret != True:
                break
            #   print(count1)
            #print(frame.shape)
            w = int(video_capture.get(3))
            h = int(video_capture.get(4))
            result = []
            t1 = time.time()

            img = letterbox(frame, new_shape=img_size)[0]
            img = img[:, :, ::-1].transpose(2, 0,
                                            1)  # BGR to RGB, to 3x416x416
            img = np.ascontiguousarray(img)
            res = frame.copy()
            dets = run_detect(model, img, device, frame)

            for det, ob in zip(dets, objects):
                ob.predict_obtracker(frame, det)
                ob.update_obtracker()
                #frame = ob.visualize(frame)
                res, data = ob.tracking_ob1(ROI, MOI, frame, info[0],
                                            frame_num, data, frame_delay)
            print('saved', len(data))

            #frame_num += 1
            if display:
                draw_roi(ROI, res)
                cv2.imshow('frame', res)
                if not pause_display:
                    key = cv2.waitKey(10)
                    if key == ord('q'):
                        break
                    if key == ord(' '):
                        pause_display = not pause_display
                    frame_num += 1
                else:
                    key = cv2.waitKey(10)
                    if key == ord('q'):
                        break
                    if key == ord(' '):
                        pause_display = not pause_display
            if writeVideo_flag:
                #save a frame
                out.write(frame)
            print('frame_num: {} fps: {} '.format(frame_num,
                                                  (1 / (time.time() - start))))
        print('Process' + info[0] + ' :{} h',
              format((time.time() - start_video) / 60))
        write_result_file(data)
        print('wrote')
    print(" ")

    print("[Finish]")

    video_capture.release()

    if writeVideo_flag:
        out.release()
        #list_file.close()
    result_file.close()
    cv2.destroyAllWindows()
示例#43
0
文件: main.py 项目: 582751489/PDTR
def main(yolo):
    t0 = time.time()
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0

    counter = []
    #deep_sort
    model_filename = 'model_data/market1501.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)  #跟踪使用

    find_objects = ['person']
    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    writeVideo_flag = True
    video_capture = cv2.VideoCapture(args["input"])

    if writeVideo_flag:
        # Define the codec and create VideoWriter object
        w = int(video_capture.get(3))
        h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        if args["ids"] == False:
            out = cv2.VideoWriter('./output/output%s.avi' % args["camera"][1],
                                  fourcc, 50, (w, h))
        else:
            out = cv2.VideoWriter(
                './output/output%s_reid.avi' % args["camera"][1], fourcc, 50,
                (w, h))
        list_file = open('detection_rslt.txt', 'w')
        frame_index = -1
        nump = 1
    #fps = 0.0

    while True:

        ret, frame = video_capture.read()  # frame shape 640*480*3
        if ret != True:
            break
        t1 = time.time()
        frame2 = copy.deepcopy(frame)
        #image = Image.fromarray(frame)
        image = Image.fromarray(frame[..., ::-1])  #bgr to rgb 仅yolo使用
        boxs, confidence, class_names = yolo.detect_image(image)
        print(boxs)
        features = encoder(frame, boxs)
        # score to 1.0 here).
        detections = [
            Detection(bbox, 1.0, feature)
            for bbox, feature in zip(boxs, features)
        ]
        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]

        # Call the tracker
        tracker.predict()
        tracker.update(detections)

        i = int(0)
        indexIDs = []
        c = []
        boxes = []
        makequery = True
        for det in detections:
            bbox = det.to_tlbr()

        for track in tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            indexIDs.append(int(track.track_id))
            counter.append(int(track.track_id))
            bbox = track.to_tlbr()
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (255, 255, 255),
                          2)  #跟踪框
            color = [int(c) for c in COLORS[indexIDs[i] % len(COLORS)]]
            list_file.write(str(frame_index) + ',')  #3-5-7-9
            list_file.write(str(track.track_id) + ',')  #画面内的所有人id
            b0 = str(bbox[0]
                     )  #.split('.')[0] + '.' + str(bbox[0]).split('.')[0][:1]
            b1 = str(bbox[1]
                     )  #.split('.')[0] + '.' + str(bbox[1]).split('.')[0][:1]
            b2 = str(bbox[2] - bbox[0]
                     )  #.split('.')[0] + '.' + str(bbox[3]).split('.')[0][:1]
            b3 = str(bbox[3] - bbox[1])

            #放置id
            list_file.write(
                str(b0) + ',' + str(b1) + ',' + str(b2) + ',' + str(b3))
            list_file.write('\n')
            if len(class_names) > 0:
                class_name = class_names[0]
                cv2.putText(frame, str(class_names[0]),
                            (int(bbox[0]), int(bbox[1] - 20)), 0, 5e-3 * 150,
                            (255, 255, 255), 2)  #person

            i += 1
            center = (int(
                ((bbox[0]) + (bbox[2])) / 2), int(((bbox[1]) + (bbox[3])) / 2))

            pts[track.track_id].append(center)

            thickness1 = 5
            for j in range(1, len(pts[track.track_id])):
                if pts[track.track_id][j - 1] is None or pts[
                        track.track_id][j] is None:
                    continue
                thickness = int(np.sqrt(64 / float(j + 1)) * 2)
                cv2.line(frame, (pts[track.track_id][j - 1]),
                         (pts[track.track_id][j]), (255, 255, 255), thickness)
            if args["ids"] == False:
                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])), (color), 3)
                cv2.putText(frame, str(track.track_id),
                            (int(bbox[0]), int(bbox[1] - 50)), 0, 5e-3 * 150,
                            (color), 2)  #id
                cv2.circle(frame, (center), 1, color, thickness1)
                for j in range(1, len(pts[track.track_id])):
                    if pts[track.track_id][j - 1] is None or pts[
                            track.track_id][j] is None:
                        continue
                    thickness = int(np.sqrt(64 / float(j + 1)) * 2)
                    cv2.line(frame, (pts[track.track_id][j - 1]),
                             (pts[track.track_id][j]), (color), thickness)
                try:
                    num = (int(args["camera"][1]) - 1) * 200
                    path = 'Z:\\pro2\\whole\\person\\gallery\\%04d' % int(
                        track.track_id + num)
                    if not os.path.exists(path):
                        os.makedirs(path)
                    if len(os.listdir(path)) <= 150:  #最多存储150张相片
                        crop = frame2[int(bbox[1]):int(bbox[3]),
                                      int(bbox[0]):int(bbox[2])]
                        crop = cv2.resize(crop, (64, 128),
                                          interpolation=cv2.INTER_AREA
                                          )  #CUBIC 对扩大图片 area 对缩小图片
                        filepath = path + '\\' + '%04d' % int(
                            track.track_id +
                            num) + '_%s_' % args["camera"] + '%04d' % int(
                                len(os.listdir(path)) +
                                1) + '_%.2f' % (video_capture.get(0) /
                                                1000) + '.jpg'  #%04d
                        cv2.imwrite(filepath, crop)
                except:
                    continue

            #单独索引
            else:
                makequery = False
                id1 = int(args["ids"])
                if int(track.track_id) == id1:
                    cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                                  (int(bbox[2]), int(bbox[3])), (color), 3)
                    cv2.putText(frame, str(track.track_id),
                                (int(bbox[0]), int(bbox[1] - 50)), 0,
                                5e-3 * 150, (color), 2)  #id
                    cv2.circle(frame, (center), 1, color, thickness1)
                    for j in range(1, len(pts[track.track_id])):
                        if pts[track.track_id][j - 1] is None or pts[
                                track.track_id][j] is None:
                            continue
                        thickness = int(np.sqrt(64 / float(j + 1)) * 2)
                        cv2.line(frame, (pts[track.track_id][j - 1]),
                                 (pts[track.track_id][j]), (color), thickness)
                    cv2.putText(frame, str(class_names[0]),
                                (int(bbox[0]), int(bbox[1] - 20)), 0,
                                5e-3 * 150, (color), 2)  #person
                else:
                    continue
        count = len(set(counter))
        cv2.putText(frame, "Total Pedestrian Counter: " + str(count),
                    (int(20), int(80)), 0, 5e-3 * 200, (0, 255, 0), 2)
        cv2.putText(frame, "Current Pedestrian Counter: " + str(i),
                    (int(20), int(40)), 0, 5e-3 * 200, (0, 255, 0), 2)
        cv2.namedWindow("YOLO4_Deep_SORT", 0)
        cv2.resizeWindow('YOLO4_Deep_SORT', 1024, 768)
        cv2.imshow('YOLO4_Deep_SORT', frame)

        if writeVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1

        out.write(frame)
        frame_index = frame_index + 1

        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    #makequery
    if makequery == True:
        root_path = 'Z:\\pro2\\whole\\person\\gallery\\'
        copy_path = 'Z:\\pro2\\whole\\person\\query\\'
        ids = os.listdir(root_path)
        #print(ids)
        for i in ids:
            img_path = root_path + i
            img = os.listdir(img_path)
            indeximg = img[int(len(img) / 2)]
            old_name = img_path + '\\' + indeximg
            new_path = copy_path + i
            new_name = new_path + '\\' + indeximg
            if not os.path.exists(new_path):
                os.makedirs(new_path)
            shutil.copyfile(old_name, new_name)
    print(" ")
    print("[Finish]")
    end = time.time()
    print("the whole time ", end - t0)
    if len(pts[track.track_id]) != None:
        print(args["input"][43:57] + ": " + str(count) + " " +
              str(class_name) + ' Found')

    else:
        print("[No Found]")
    video_capture.release()
    if writeVideo_flag:
        out.release()
        list_file.close()
    cv2.destroyAllWindows()
示例#44
0
def main():
    global frame, frame_index, out, list_file, track, count

    start = time.time()

    # 参数定义
    max_cosine_distance = 0.5  # 0.9 余弦距离的控制阈值
    nn_budget = None
    nms_max_overlap = 0.3  # 非极大抑制的阈值
    # 是否保存识别结果
    write_video_flag = True

    counter = []

    # load our serialized model from disk
    # print("[INFO] loading model...")
    net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

    # deep_sort
    model_filename = 'model_data/market1501.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    video_capture = cv2.VideoCapture(args["input"])
    obj_count_txt_filename = 'counter.txt'
    count_file = open(obj_count_txt_filename, 'a')
    count_file.write('\n')

    if write_video_flag:
        # Define the codec and create VideoWriter object
        w = int(video_capture.get(3))
        h = int(video_capture.get(4))
        # DIVX, XVID, MJPG, X264, WMV1, WMV2.(XVID is more preferable.MJPG results in high size ideo.X264 gives ery small size video)
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter(
            os.path.join('video',
                         str(args['input'].split('.')[0][-7:]) + '_out.avi'),
            fourcc, 20, (w, h))
        list_file = open('detection.txt', 'w')
        frame_index = -1
    # 帧率计数
    fps = 0.0

    while True:

        ret, frame = video_capture.read()  # frame shape 640*480*3
        if not ret:
            print("Can't receive frame (stream end?). Exiting ...")
            break
        time1 = time.time()

        # frame = imutils.resize(frame, width=800)
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843,
                                     (300, 300), 127.5)
        # predictions 检测
        time2 = time.time()

        net.setInput(blob)
        detections = net.forward()

        # detections.shape
        # >>> (1, 1, n, 7)
        # eg:(1, 1, 2, 7)
        # [[[[0.          9.          0.42181703  0.4647404   0.610577
        #     0.6360997   0.8479532]
        #    [0.         15.          0.8989926   0.21603307  0.42735672
        #    0.58441484  0.8699994]]]]
        boxs = []
        class_names = []
        for i in np.arange(0, detections.shape[2]):
            confidence = detections[0, 0, i, 2]
            # greater than the minimum confidence
            if confidence > args["confidence"]:
                idx = int(detections[0, 0, i, 1])
                class_name = CLASSES[idx]

                # 筛选类别
                if class_name in NEED_CLASSES:
                    class_names.append(class_name)
                    box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                    # 转为整形坐标
                    (startX, startY, endX, endY) = box.astype("int")
                    startX = 0 if startX < 0 else startX
                    startY = 0 if startY < 0 else startY

                    boxs.append([startX, startY, endX - startX, endY - startY])

        print(boxs, class_names)
        time3 = time.time()
        print('detect cost is', time3 - time2)

        # 特征提取
        features = encoder(frame, boxs)
        # score to 1.0 here).
        detections = [
            Detection(bbox, class_name, 1.0, feature)
            for bbox, class_name, feature in zip(boxs, class_names, features)
        ]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]

        time4 = time.time()
        print('features extract is', time4 - time3)

        # Call the tracker
        tracker.predict()
        tracker.update(detections)
        time5 = time.time()
        print('update tracker cost:', time5 - time4)

        i = 0
        # 跟踪器id
        indexIDs = []

        for track in tracker.tracks:

            # todo and or
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            # boxes.append([track[0], track[1], track[2], track[3]])
            indexIDs.append(track.track_id)
            counter.append(track.track_id)
            bbox = track.to_tlbr()
            start_x, start_y, end_x, end_y = bbox.astype('int')
            color = COLORS[indexIDs[i] % len(COLORS)].tolist()

            if not track.flag and track.class_name == 'person':
                track.flag = handle_face_car('person', start_x, start_y, end_x,
                                             end_y)
            else:
                track.flag = handle_face_car(track.class_name, start_x,
                                             start_y, end_x, end_y,
                                             not track.flag)
            # 画目标跟踪框、id标注
            cv2.rectangle(frame, (start_x, start_y), (end_x, end_y), color, 3)
            cv2.putText(frame, track.class_name + str(track.track_id),
                        (int(bbox[0]), int(bbox[1] - 40)), 0, 0.75, color, 2)

            i += 1
            # 画运动轨迹 draw motion path
            center = int(((bbox[0]) + (bbox[2])) / 2), int(
                ((bbox[1]) + (bbox[3])) / 2)
            pts[track.track_id].append(center)
            thickness = 5
            cv2.circle(frame, center, 1, color, thickness)

            for j in range(1, len(pts[track.track_id])):
                if pts[track.track_id][j - 1] is None or pts[
                        track.track_id][j] is None:
                    continue
                thickness = int(np.sqrt(64 / (j + 1.0)) * 2)
                cv2.line(frame, (pts[track.track_id][j - 1]),
                         (pts[track.track_id][j]), color, thickness)

        time6 = time.time()
        print('handle tracker cost:', time6 - time5)

        # 画目标检测白框
        # for det in detections:
        #     bbox = det.to_tlbr()
        #     cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)

        count = len(set(counter))
        cv2.putText(frame, "Total Object Counter: " + str(count), (20, 120), 0,
                    0.75, (0, 255, 0), 2)
        cv2.putText(frame, "Current Object Counter: " + str(i), (20, 80), 0,
                    0.75, (0, 255, 0), 2)
        cv2.putText(frame, "FPS: %f" % fps, (20, 40), 0, 1.0, (0, 255, 0), 2)
        # time7 = time.time()
        # print('Draw Rectangle and Text cost:', time7 - time6)

        cv2.namedWindow("SSD_Deep_SORT", 0)
        cv2.resizeWindow('SSD_Deep_SORT', 1024, 768)
        cv2.imshow('SSD_Deep_SORT', frame)

        if write_video_flag:
            # save a frame
            out.write(frame)
            frame_index += 1
            list_file.write(str(frame_index) + ' ')
            if len(boxs) != 0:
                for i in range(0, len(boxs)):
                    list_file.write(
                        str(boxs[i][0]) + ' ' + str(boxs[i][1]) + ' ' +
                        str(boxs[i][2]) + ' ' + str(boxs[i][3]) + ' ')
            list_file.write('\n')
        fps = (fps + (1. / (time.time() - time1))) / 2
        # print(set(counter))

        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    print("[Finish]")
    end = time.time()

    # if len(pts[track.track_id]):
    #     print(str(args["input"]) + ": " + str(count) + 'target Found')
    #     count_file.write(str("[VIDEO]: " + args["input"]) + " " + (
    #         str(count)) + " " + "[MODEL]: MobileNetSSD" + " " + "[TIME]:" + (str('%.2f' % (end - start))))
    # else:
    #     print("[No Found]")

    video_capture.release()
    count_file.write('\n')
    count_file.close()
    if write_video_flag:
        out.release()
        list_file.close()
    cv2.destroyAllWindows()
示例#45
0
#! /usr/bin/env python

## Program to find the prime numbers from 1 to n.

import math
from timeit import time

primeno = []

def prime(n):
    for i in xrange(2, n + 1):
        sq = math.sqrt(i)
        count = 0
        for j in primeno:
            if j > sq + 1:
                break
            if i % j == 0:
                count += 1
                break
        if count == 0:
            primeno.append(i)

start_time = time.time()
prime(10**5)  # You can specify any number
end_time = time.time()
total_time = end_time - start_time # Time in seconds
#print primeno # uncomment this line if you want to see the prime numbers.
print 'Total time:', total_time
示例#46
0
def main():
    salir = False
    while salir == False:
        print("Seleccione lo que desea ejecutar: \n")
        print("1: Bezout.")
        print("2: Inverso modular. ")
        print("3: Potencia modular. ")
        print("4: Test Miller-Rabin. ")
        print("5: Paso enano-gigante. ")
        print("6: Teorema chino de los restos. ")
        print("7: Jacobi. ")
        print("8: Raices modulares. ")
        print("9: Pollard. ")
        print("10: Fermat. ")
        print("11: Levantar raices. ")
        print("12: salir")
        seleccion = input()
        if seleccion == "1":
            #5,9
            print("##################BEZOUT################## \n")
            print("Dime el valor de a: \n")
            a = int(input())
            print("Dime el valor de b: \n")
            b = int(input())
            start_time = time.time()
            salida = Bezout(a,b)
            elapsed_time = time.time() - start_time
            print("Los valores de Bezout son:","\nValor de u: ",salida[0],"\nValor de v: ",salida[1],"\nValor de mcd: ",salida[2])
            print("Elapsed time: %0.10f seconds." % elapsed_time)
        elif seleccion == "2":
            print("##################INVERSO MODULAR################## \n")
            print("Dime el valor de a: \n")
            a = int(input())
            print("Dime el valor de b: \n")
            b = int(input())
            start_time = time.time()
            salida1 = inverso_modular(a,b)
            elapsed_time = time.time() - start_time
            print("El inverso modular es: " , salida1)
            print("Elapsed time: %0.10f seconds." % elapsed_time)
        elif seleccion == "3":
            print("##################POTENCIA MODULAR################## \n")
            print("Dime el valor de a: \n")
            a = int(input())
            print("Dime el valor de n: \n")
            n = int(input())
            print("Dime el valor de p: \n")
            p = int(input())
            start_time = time.time()
            salida2 = potencia_mod(a,n,p)
            elapsed_time = time.time() - start_time
            print("Potencia Modular: ",salida2)
            print("Elapsed time: %0.10f seconds." % elapsed_time)
        elif seleccion == "4":
            print("##################TEST DE MILLER-RABIN################## \n")
            print("Dime el valor de a: \n")
            a = int(input())
            start_time = time.time()
            salida3 = test_Miller_Rabin(a)
            elapsed_time = time.time() - start_time
            print("Resultado Test Miller-Rabin: ",salida3)
            print("Elapsed time: %0.10f seconds." % elapsed_time)
        elif seleccion == "5":
            print("##################PASO ENANO-GIGANTE################## \n")
            print("Dime el valor de a: \n")
            a = int(input())
            print("Dime el valor de n: \n")
            n = int(input())
            print("Dime el valor de p: \n")
            p = int(input())
            start_time = time.time()
            salida4 = Paso_enano_gigante(a,b,p)
            elapsed_time = time.time() - start_time
            print("La solución Paso-enano-gigante es: ",salida4)
            print("Elapsed time: %0.10f seconds." % elapsed_time)
        elif seleccion == "6":
            print("##################TEOREMA CHINO DE LOS RESTOS################## \n")
            print("Dime el valor de a: \n")
            a = int(input())
            print("Dime el valor de n: \n")
            n = int(input())
            print("Dime el valor de p: \n")
            p = int(input())
            print("Dime el valor de q: \n")
            q = int(input())
            start_time = time.time()
            salida5 = T_chino_restos(a,b,p,q)
            elapsed_time = time.time() - start_time
            print("La salida del Teorema chino de los restos es: ",salida5)
            print("Elapsed time: %0.10f seconds." % elapsed_time)
        elif seleccion == "7":
            print("##################JACOBI################## \n")
            print("Dime el valor de p: \n")
            p = int(input())
            print("Dime el valor de m: \n")
            m = int(input())
            start_time = time.time()
            salida6 = Jacobi(p,m)
            elapsed_time = time.time() - start_time
            print("La salida de Jacobi es: ", salida6)
            print("Elapsed time: %0.10f seconds." % elapsed_time)
        elif seleccion == "8":
            print("##################RAICES MODULARES################## \n")
            print("Dime el valor de a: \n")
            a = int(input())
            print("Dime el valor de p: \n")
            p = int(input())
            start_time = time.time()
            salida7 = raices_modulares(a,p)
            elapsed_time = time.time() - start_time
            print("La raiz modular es: ",salida7)
            print("Elapsed time: %0.10f seconds." % elapsed_time)
        elif seleccion == "9":
            print("##################POLLARD################## \n")
            print("Dime el valor de a: \n")
            a = int(input())
            start_time = time.time()
            salida8 = Pollard(a)
            elapsed_time = time.time() - start_time
            print("Pollard es: ",salida8)
            print("Elapsed time: %0.10f seconds." % elapsed_time)
        elif seleccion == "10":
            print("##################FERMAT################## \n")
            print("Dime el valor de a: \n")
            a = int(input())
            start_time = time.time()
            salida9 = Fermat(91)
            elapsed_time = time.time() - start_time
            print("Fermat es:",salida9)
            print("Elapsed time: %0.10f seconds." % elapsed_time)                
        elif seleccion == "11":
            print("##################LEVANTAR RAICES################## \n")
            print("Dime el valor de a: \n")
            a = int(input())
            print("Dime el valor de p: \n")
            p = int(input())
            print("Dime el valor de q: \n")
            q = int(input())
            start_time = time.time()
            salida10 = levantar_raices(a,p,q)
            elapsed_time = time.time() - start_time
            print("levantar raices: ",salida10)
            print("Elapsed time: %0.10f seconds." % elapsed_time)
        elif seleccion == "12":
            salir = True
        else:
            print("Selección incorrecta \n")
示例#47
0
def main(yolo):
    # Definition of the parameters
    max_cosine_distance = 0.2
    nn_budget = None
    nms_max_overlap = 1.0

    output_format = 'mp4'
    video_name = 'bus4_2in_4out.mp4'
    file_path = join('data_files/videos', video_name)
    output_name = 'save_data/out_' + video_name[0:-3] + output_format
    initialize_door_by_yourself = False
    door_array = None
    # Deep SORT
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    show_detections = True
    writeVideo_flag = True
    asyncVideo_flag = False

    counter = Counter(counter_in=0, counter_out=0, track_id=0)

    if asyncVideo_flag:
        video_capture = VideoCaptureAsync(file_path)
    else:
        video_capture = cv2.VideoCapture(file_path)

    if asyncVideo_flag:
        video_capture.start()

    if writeVideo_flag:
        if asyncVideo_flag:
            w = int(video_capture.cap.get(3))
            h = int(video_capture.cap.get(4))
        else:
            w = int(video_capture.get(3))
            h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter(output_name, fourcc, 15, (w, h))
        frame_index = -1

    fps = 0.0
    fps_imutils = imutils.video.FPS().start()

    ret, first_frame = video_capture.read()

    if door_array is None:
        if initialize_door_by_yourself:
            door_array = select_object(first_frame)[0]
            print(door_array)
        else:
            all_doors = read_door_info('data_files/doors_info.csv')
            door_array = all_doors[video_name]

    border_door = door_array[3]
    error_values = []
    truth = get_truth(video_name)
    while True:
        ret, frame = video_capture.read()  # frame shape 640*480*3
        if not ret:
            total_count = counter.return_total_count()
            true_total = truth.inside + truth.outside
            err = abs(total_count - true_total) / true_total
            log_res = "in video: {}\n predicted / true\n counter in: {} / {}\n counter out: {} / {}\n" \
                      " total: {} / {}\n error: {}\n______________\n".format(video_name, counter.counter_in,
                                                                             truth.inside,
                                                                             counter.counter_out, truth.outside,
                                                                             total_count, true_total, err)
            with open('log_results.txt', 'w') as file:
                file.write(log_res)
            print(log_res)
            error_values.append(err)
            break

        t1 = time.time()

        image = Image.fromarray(frame[..., ::-1])  # bgr to rgb
        boxes, confidence, classes = yolo.detect_image(image)

        features = encoder(frame, boxes)
        detections = [
            Detection(bbox, confidence, cls,
                      feature) for bbox, confidence, cls, feature in zip(
                          boxes, confidence, classes, features)
        ]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        classes = np.array([d.cls for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]

        # Call the tracker
        tracker.predict()
        tracker.update(detections)

        cv2.rectangle(frame, (int(door_array[0]), int(door_array[1])),
                      (int(door_array[2]), int(door_array[3])), (23, 158, 21),
                      2)

        for det in detections:
            bbox = det.to_tlbr()
            if show_detections and len(classes) > 0:
                score = "%.2f" % (det.confidence * 100) + "%"
                rect_head = Rectangle(bbox[0], bbox[1], bbox[2], bbox[3])
                rect_door = Rectangle(int(door_array[0]), int(door_array[1]),
                                      int(door_array[2]), int(door_array[3]))
                intersection = rect_head & rect_door

                if intersection:
                    squares_coeff = rect_square(*intersection) / rect_square(
                        *rect_head)
                    cv2.putText(
                        frame,
                        score + " inter: " + str(round(squares_coeff, 3)),
                        (int(bbox[0]), int(bbox[3])), 0, 1e-3 * frame.shape[0],
                        (0, 100, 255), 5)
                    cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                                  (int(bbox[2]), int(bbox[3])), (255, 0, 0), 3)

        for track in tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            bbox = track.to_tlbr()
            # first appearence of object with id=track.id

            if track.track_id not in counter.people_init or counter.people_init[
                    track.track_id] == 0:
                counter.obj_initialized(track.track_id)
                rect_head = Rectangle(bbox[0], bbox[1], bbox[2], bbox[3])
                rect_door = Rectangle(door_array[0], door_array[1],
                                      door_array[2], door_array[3])
                res = rect_head & rect_door
                if res:

                    inter_square = rect_square(*res)
                    head_square = rect_square(*rect_head)
                    #     was initialized in door, probably going in
                    if (inter_square / head_square) >= 0.8:
                        counter.people_init[track.track_id] = 2
                        #     initialized in the bus, mb going out
                    elif (inter_square /
                          head_square) <= 0.4 or bbox[3] > border_door:
                        counter.people_init[track.track_id] = 1
                # res is None, means that object is not in door contour
                else:
                    counter.people_init[track.track_id] = 1

                counter.people_bbox[track.track_id] = bbox
            counter.cur_bbox[track.track_id] = bbox

            adc = "%.2f" % (track.adc *
                            100) + "%"  # Average detection confidence
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
            cv2.putText(frame, "ID: " + str(track.track_id),
                        (int(bbox[0]), int(bbox[1])), 0, 1e-3 * frame.shape[0],
                        (0, 255, 0), 5)

            if not show_detections:
                track_cls = track.cls
                cv2.putText(frame, str(track_cls),
                            (int(bbox[0]), int(bbox[3])), 0,
                            1e-3 * frame.shape[0], (0, 255, 0), 1)
                cv2.putText(
                    frame, 'ADC: ' + adc,
                    (int(bbox[0]), int(bbox[3] + 2e-2 * frame.shape[1])), 0,
                    1e-3 * frame.shape[0], (0, 255, 0), 1)

        id_get_lost = [
            track.track_id for track in tracker.tracks
            if track.time_since_update >= 25 and track.age >= 29
        ]
        id_inside_tracked = [
            track.track_id for track in tracker.tracks if track.age > 60
        ]
        for val in counter.people_init.keys():
            # check bbox also
            cur_c = find_centroid(counter.cur_bbox[val])
            init_c = find_centroid(counter.people_bbox[val])
            vector_person = (cur_c[0] - init_c[0], cur_c[1] - init_c[1])

            if val in id_get_lost and counter.people_init[val] != -1:
                # if vector_person < 0 then current coord is less than initialized, it means that man is going
                # in the exit direction
                if vector_person[1] > 70 and counter.people_init[
                        val] == 2:  # and counter.people_bbox[val][3] > border_door \
                    counter.get_in()

                elif vector_person[1] < -70 and counter.people_init[val] == 1:
                    counter.get_out()

                counter.people_init[val] = -1
                print(f"person left frame")
                print(f"current centroid - init : {cur_c} - {init_c}\n")
                print(f"vector: {vector_person}\n")

                del val
            # elif val in id_inside_tracked and val not in id_get_lost and counter.people_init[val] == 1 \
            #         and bb_intersection_over_union(counter.cur_bbox[val], door_array) <= 0.3 \
            #         and vector_person[1] > 0:  # and \
            #     # counter.people_bbox[val][3] > border_door:
            #     counter.get_in()
            #
            #     counter.people_init[val] = -1
            #     print(f"person is tracked for a long time")
            #     print(f"current centroid - init : {cur_c} - {init_c}\n")
            #     print(f"vector: {vector_person}\n")
            #     imaggg = cv2.line(frame, find_centroid(counter.cur_bbox[val]),
            #                       find_centroid(counter.people_bbox[val]),
            #                       (0, 0, 255), 7)

            # cv2.imshow('frame', imaggg)
            # cv2.waitKey(0)

        ins, outs = counter.show_counter()
        cv2.putText(frame, "in: {}, out: {} ".format(ins, outs), (10, 30), 0,
                    1e-3 * frame.shape[0], (255, 0, 0), 5)

        cv2.namedWindow('image', cv2.WINDOW_NORMAL)
        cv2.resizeWindow('image', 1400, 800)
        cv2.imshow('image', frame)

        if writeVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1

        fps_imutils.update()

        if not asyncVideo_flag:
            fps = (fps + (1. / (time.time() - t1))) / 2
            # print("FPS = %f" % (fps))

        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps_imutils.stop()
    print('imutils FPS: {}'.format(fps_imutils.fps()))

    if asyncVideo_flag:
        video_capture.stop()
    else:
        video_capture.release()

    if writeVideo_flag:
        out.release()

    cv2.destroyAllWindows()

    mean_error = np.mean(error_values)
    print("mean error for {} video: {}".format(video_name, mean_error))
示例#48
0
文件: dEvol.py 项目: rahlk/Bellwether
  res = DE.DE()
#  print(model.depen(res))
  return res


def tuner(model, data):
  if model == rforest:
    return _de(tuneRF, data)
  elif model == CART:
    return _de(tuneCART, data)

if __name__ == '__main__':
  from timeit import time
  data = explore(dir='../Data/')[0][5]  # Only training data to tune.
  print(data)
#   set_trace()
  for m in [tuneRF]:
    t = time.time()
    mdl = m(data)
#   _test(data)
    tunings = _de(m, data)
    print(tunings)
    print(mdl.depen(tunings))
    print(time.time() - t)
#   print _de()
#  print main()
#  import sk; xtile = sk.xtile
#  print xtile(G)

 # main(dir = 'Data/')
示例#49
0
def main():
    start = time.time()
    counter = []
    writeVideo_flag = False
    fps = 0.0
    filename_path = os.path.join(result_path, 'submission.txt')
    list_video, list_ids = load_list_video(list_video_path, id_path)
    result_file = open(filename_path, 'w')

    max_cosine_distance=0.8
    nn_budget = 100
    nms_max_overlap = 1.0
    display = True
    for video in list_video: 
        path = os.path.join(video_path, video)
        ROI = load_roi(zones_path, video)
        vis=visualization.Visualization(img_shape=(960,1280,3), update_ms=2000)

        metric = nn_matching.NearestNeighborDistanceMetric(
        "cosine", max_cosine_distance, nn_budget)
        tracker = Tracker(metric)
        results = []
        print("Processing video: ", video )
        video_capture = cv2.VideoCapture(path)

        pause_display = False
        frame_num = 0
        while True:
            
            start = time.time()
            # print(count)
            video_capture.set(cv2.CAP_PROP_POS_FRAMES, frame_num)
            ret, frame = video_capture.read()  # frame shape 640*480*3
            # gray = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
            # frame = np.zeros_like(frame1)
            # frame[:,:,0] = gray
            # frame[:,:,1] = gray
            # frame[:,:,2] = gray
            
            if ret != True:
                break
             #   print(count1)
            #print(frame.shape)
            w = int(video_capture.get(3))
            h = int(video_capture.get(4))   
            result = []
            t1 = time.time()
            
            img = letterbox(frame, new_shape=img_size)[0]
            img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
            img = np.ascontiguousarray(img)
            dets = run_detect(model,img,device,frame)

            detectionss=[]
            for det in dets:
                feature = gdet.HOG_feature(frame, det[:4])
                detectionss.append(Detection(det[:4], det[4], feature, det[-1]))   
            #detectionss.append(Detection(det[:4], det[4], det[-1]) for det in dets)
            img = np.zeros((h, w, 3), np.uint8)
            img = frame.copy()
            min_confidence = 0.4
            detections = [d for d in detectionss if d.confidence >= min_confidence]

            # Run non-maxima suppression.
            boxes = np.array([d.tlwh for d in detections])
            scores = np.array([d.confidence for d in detections])
            indices = preprocessing.non_max_suppression(
                boxes, nms_max_overlap, scores)
            detections = [detections[i] for i in indices]

            # Update tracker.
            tracker.predict()
            tracker.update(detections)

            if display:
                vis.set_image(frame.copy())
                vis.draw_detections(detections)
                vis.draw_trackers(tracker.tracks)
            res = vis.return_img()
            draw_roi(ROI, res)
            cv2.imshow('frame', res)
            print('frame_num', frame_num)
            if not pause_display:
                key = cv2.waitKey(2)
                if key == ord('q'):
                    break
                if key == ord(' '):
                    pause_display = not pause_display
                frame_num += 1
            else:
                key = cv2.waitKey(0)
                if key == ord('q'):
                    break
                if key == ord(' '):
                    pause_display = not pause_display
            
        print(" ")
        print("[Finish]")
        

    video_capture.release()

    if writeVideo_flag:
        out.release()
        #list_file.close()
    result_file.close()
    cv2.destroyAllWindows()
示例#50
0
    def update(self,frame,recent_pupil_positions,events):
        
        for pt in recent_pupil_positions:
            #if the eye is detected
            if pt['norm_gaze'] is not None:
                self.nb_frame += 1
                #if the eye was closed
                if(self.eye_open == False):
                    self.time_eye_o = time.time()
                self.eye_open = True
                
                #BLINK CONTROL
                #if the eye is opened too long, previous blinks are forgotten
                if(time.time() - self.time_eye_o) > self.FORGET_BLINK:
                    self.nb_blink = 0
                #if the previous closed session was long, nb_blink is incremented
                if(time.time() - self.time_eye_c) > self.BLINK:
                    self.nb_blink += 1
                    print "Nb blink :"
                    print self.nb_blink
                #if there are 2 blinks : we switch the control mode (0->1 and 1->0)
                if(self.nb_blink >= 2):
                    if(self.control_mode.value == self.OFF):
                        self.control_mode.value = self.ON
                    else:
                        self.control_mode.value = self.OFF
                    self.nb_blink = 0
                    print "Mode control"
                    print self.control_mode.value
                
                self.time_eye_c = time.time()
                    
                #STATE CONTROL
                if (self.nb_frame > self.STATE_WAIT and self.control_mode.value == self.ON): 
                    #self.LEFT
                    if (pt['norm_gaze'][0] < self.LEFT_BORDER) and (self.state.value!=self.BACKWARD):
                        self.previous_state = self.state.value
                        self.state.value = self.LEFT
                        self.nb_frame = 0
                    elif (self.state.value == self.LEFT) and (pt['norm_gaze'][0] > self.LEFT_BORDER):
                        if self.previous_state == self.STOP:
                            self.state.value = self.STOP
                            self.nb_frame = 0
                        else:
                            self.state.value = self.NORMAL_SPEED
                            self.nb_frame = 0
                    #self.RIGHT
                    elif (pt['norm_gaze'][0] > self.RIGHT_BORDER) and (self.state.value!=self.BACKWARD):
                        self.previous_state = self.state.value
                        self.state.value = self.RIGHT
                        self.nb_frame = 0
                    elif (self.state.value == self.RIGHT) and (pt['norm_gaze'][0] < self.RIGHT_BORDER):
                        if self.previous_state == self.STOP:
                            self.state.value = self.STOP
                            self.nb_frame = 0
                        else:
                            self.state.value = self.NORMAL_SPEED
                            self.nb_frame = 0
                    #NORMAL SPEED
                    elif (pt['norm_gaze'][1]>self.UP_BORDER) and (self.state.value==self.STOP):
                        self.state.value = self.NORMAL_SPEED
                        self.nb_frame = 0
                    elif (pt['norm_gaze'][1]<self.DOWN_BORDER) and (self.state.value==self.HIGH_SPEED):
                        self.state.value = self.NORMAL_SPEED
                        self.nb_frame = 0
                    
                    #HIGH SPEED
                    elif (pt['norm_gaze'][1]>self.UP_BORDER) and (self.state.value==self.NORMAL_SPEED):
                        self.state.value = self.HIGH_SPEED
                        self.nb_frame = 0
                    #self.BACKWARD
                    elif (pt['norm_gaze'][1]<self.DOWN_BORDER) and (self.state.value==self.STOP):
                        self.state.value = self.BACKWARD
                        self.nb_frame = 0
                    
                    #self.STOP
                    elif (pt['norm_gaze'][1]>self.UP_BORDER) and (self.state.value==self.BACKWARD):
                        self.state.value = self.STOP
                        self.nb_frame = 0
                    elif (pt['norm_gaze'][1]<self.DOWN_BORDER) and (self.state.value==self.NORMAL_SPEED):
                        self.state.value = self.STOP
                        self.nb_frame = 0
                    
                #END if nb_frame > STATE_WAIT    
                
                self.gaze_x.value = pt['norm_gaze'][0]
                self.gaze_y.value = pt['norm_gaze'][1]
                   
                
                self.pupil_display_list.append(pt['norm_gaze'])
            #if the eye is not detected
            else:
                #if the eye was open
                if self.eye_open == True:
                    self.time_eye_c = time.time()
                
                self.eye_open = False
                self.time_eye_o = time.time()
                #if the eye is closed too long, we stop
                if (time.time() - self.time_eye_c) > self.BLINK:
                    self.state.value = self.STOP
                    print "Volunteer blink"
                
        self.pupil_display_list[:-3] = []


        if self.window_should_close:
            self.close_window()

        if self.window_should_open:
            self.open_window()