Esempio n. 1
0
def filter_per_activities_to_keep(message):
    if check_registration(message):
        def _filter(msg):
            if msg.text == DONE_MENU:
                if len(activities_to_keep) > 0:
                    if start_processing(message, no_positive_message=True): return
                    bot.send_chat_action(message.chat.id, STATUS_TYPING)
                    bot.send_message(chat_id, random.choice(OK_MESSAGES), reply_markup=types.ReplyKeyboardRemove(selective=False))
                    pm.filter_per_activities_to_keep(replied_message.chat.id, activities_to_keep)
                    bot.send_chat_action(message.chat.id, STATUS_TYPING)
                    bot.send_message(message.chat.id, "I applied the filter")
                    end_processing(message)
                else:
                    bot.send_chat_action(message.chat.id, STATUS_TYPING)
                    bot.reply_to(msg, "Ooops, no activities selected")
                    bot.register_next_step_handler(msg, _filter)
            else:
                activities_to_keep.append(msg.text)
                bot.register_next_step_handler(msg, _filter)

        if start_processing(message, no_positive_message=True): return
        activities_to_keep = []
        chat_id = message.chat.id
        tracker.track(chat_id, "keepactivities")
        activities = pm.get_all_activities(chat_id)
        markup = types.ReplyKeyboardMarkup(row_width=1)
        for a in activities:
            markup.add(a)
        markup.add(DONE_MENU)
        bot.send_chat_action(message.chat.id, STATUS_TYPING)
        replied_message = bot.send_message(chat_id, "Select which activities you want to keep:", reply_markup=markup)
        bot.register_next_step_handler(replied_message, _filter)
        end_processing(message)
Esempio n. 2
0
def alpha_miner(message):
    if check_registration(message):
        if start_processing(message): return
        tracker.track(message.chat.id, "alpha")
        bot.send_chat_action(message.chat.id, STATUS_UPLOAD_PICTURE)
        model = pm.bot_alpha_miner(message.chat.id)
        bot.send_photo(message.chat.id, open(model, "rb"))
        end_processing(message)
Esempio n. 3
0
def dependency_graph(message):
    if check_registration(message):
        if start_processing(message): return
        tracker.track(message.chat.id, "dfg")
        bot.send_chat_action(message.chat.id, STATUS_UPLOAD_PICTURE)
        model = pm.bot_dfg(message.chat.id)
        bot.send_photo(message.chat.id, open(model, "rb"))
        end_processing(message)
Esempio n. 4
0
def bot_inductive_miner(message):
    if check_registration(message):
        if start_processing(message): return
        tracker.track(message.chat.id, "im")
        pic_file = promexecutor.inductive_miner(PROM_LITE, message.chat.id)
        bot.send_chat_action(message.chat.id, STATUS_UPLOAD_PICTURE)
        bot.send_photo(message.chat.id, open(pic_file, "rb"))
        end_processing(message)
Esempio n. 5
0
def revert_filter(message):
    if check_registration(message):
        if start_processing(message, no_positive_message=True): return
        chat_id = message.chat.id
        tracker.track(chat_id, "removefilters")
        pm.reset_filter(chat_id)
        bot.send_chat_action(chat_id, STATUS_TYPING)
        bot.send_message(chat_id, "I restored the log to its original form")
        end_processing(message)
Esempio n. 6
0
def relative_dotted_chart(message):
    if check_registration(message):
        if start_processing(message): return
        tracker.track(message.chat.id, "relativedottedchart")
        pic = rexecutor.run_r_code(R_SCRIPT,
                                   R_SCRIPTS_FOLDER + "relative_dotted_chart.R",
                                   pm.get_property(message.chat.id, "current_log"))
        bot.send_chat_action(message.chat.id, STATUS_UPLOAD_PICTURE)
        bot.send_photo(message.chat.id, open(pic, "rb"))
        end_processing(message)
Esempio n. 7
0
def precedence_matrix(message):
    if check_registration(message):
        if start_processing(message): return
        tracker.track(message.chat.id, "resources")
        pic = rexecutor.run_r_code(R_SCRIPT,
                                   R_SCRIPTS_FOLDER + "resource_frequencies.R",
                                   pm.get_property(message.chat.id, "current_log"))
        bot.send_chat_action(message.chat.id, STATUS_UPLOAD_PICTURE)
        bot.send_photo(message.chat.id, open(pic, "rb"))
        end_processing(message)
Esempio n. 8
0
 def get_img(self,imgs,fl_indexs):
     """流式处理图像
     Example
     :imgs = video[0:3]
     :fl_indexs = [0,2]
     :imgs = Process().get_img(imgs,fl_indexs)
     Args:
         imgs ([List]): 图像数组,
         fl_indexs ([List]): 图像数组中每个图像对应视频中的第几帧
     
     Returns:
         [List]: 经过跟踪算法填充后的图像数组
     """        
     ffbbs,lfbbs = self._get_bboxs_from_imgs(imgs)
     (fhat_bbox,fhat_bbox_pro),(fperson_bbox,fperson_bbox_pro) = ffbbs
     (lhat_bbox,lhat_bbox_pro),(lperson_bbox,lperson_bbox_pro) = lfbbs
     flhat_bboxs = [fhat_bbox,lhat_bbox]
     flpsn_bboxs = [fperson_bbox,lperson_bbox]
     # 通过第一帧探测到的bboxs和最后一帧探测到的bboxs,结合跟踪算法推断出中间帧数的bboxs信息
     psn_bboxs_ids = track(flpsn_bboxs,self.psn_tracker)
     hat_bboxs_ids = track(flhat_bboxs,self.hat_tracker)
     # 跟踪推断中间的帧的bboxs
     psn_bboxs_index,all_psn_bbox_ids = fill(psn_bboxs_ids,fl_indexs)
     hat_bboxs_index,all_hat_bbox_ids = fill(hat_bboxs_ids,fl_indexs)
     # 将探测到的目标数据传入打包为objects
     psn_objects = [
         (all_psn_bbox_ids[bindex],all_psn_bbox_ids[bindex],psn_bboxs_index[bindex]) 
         for bindex in range(len(psn_bboxs_index)-1)]
     hat_objects = [
         (all_hat_bbox_ids[bindex],all_hat_bbox_ids[bindex],hat_bboxs_index[bindex]) 
         for bindex in range(len(hat_bboxs_index)-1)]
     # 绘制目标探测图像
     origin_frames = draw_label1(
         imgs,
         all_psn_bbox_ids,
         lperson_bbox_pro,
         "no wear helmet",
         color_val(self.person_color),
         color_val(self.person_color))
     origin_frames = draw_label1(
         imgs,
         all_hat_bbox_ids,
         lhat_bbox_pro,
         'wear helmet',
         color_val(self.hat_color),
         color_val(self.hat_color))
     
     return origin_frames,psn_objects,hat_objects
Esempio n. 9
0
def hm(message):
    if check_registration(message):
        if start_processing(message): return
        tracker.track(message.chat.id, "hm")
        args = message.text.split()
        dep_threshold = 0.99
        if len(args) == 2:
            try:
                dep_threshold = float(args[1])
            except ValueError:
                pass
        models = pm.bot_hm(message.chat.id, dependency_threshold=dep_threshold)
        for m in models:
            bot.send_chat_action(message.chat.id, STATUS_UPLOAD_PICTURE)
            bot.send_photo(message.chat.id, open(m, "rb"))
        end_processing(message)
Esempio n. 10
0
def describe_log(message):
    if check_registration(message):
        if start_processing(message): return
        tracker.track(message.chat.id, "describelog")
        description = pm.describe_log(message.chat.id)
        textual_description = "<b>Total number of traces:</b> " + str(description["traces"]) + "\n"
        textual_description += "<b>Activities with frequencies</b>:\n"
        for a in description["acts_freq"]:
            textual_description += " - " + a + ": " + str(description["acts_freq"][a]) + "\n"
        bot.send_chat_action(message.chat.id, STATUS_TYPING)
        bot.send_message(message.chat.id, textual_description, parse_mode="html")
        if description["case_duration"] is not None:
            bot.send_chat_action(message.chat.id, STATUS_UPLOAD_PICTURE)
            bot.send_photo(message.chat.id, open(description["case_duration"], "rb"))
        if description["events_over_time"] is not None:
            bot.send_chat_action(message.chat.id, STATUS_UPLOAD_PICTURE)
            bot.send_photo(message.chat.id, open(description["events_over_time"], "rb"))
        end_processing(message)
Esempio n. 11
0
def location():
    # Store by location
    if request.method == 'POST':
        return to_json(tracker.track(request.values))

    # Query by location.  Returns active WORKER users only
    if request.method == 'GET':
        users = [clean_dict(user) for user in tracker.search(request.values)]
        return to_json(users)
Esempio n. 12
0
def process_video(model,
                  input_path,
                  output_path,
                  require_fps,
                  hat_color,
                  person_color,
                  fourcc='mp4v'):
    """处理视频并输出到指定目录
    
    Arguments:
        model {torch.nn.Sequ} -- [使用的模型]
        input_path {[str]} -- [视频文件路径]
        require_fps {[int]} -- [输出的视频fps]
        fourcc {[str]} -- [opencv写文件编码格式]
        hat_color {[str]} -- [安全帽框颜色]
        person_color {[str]} -- [人头框颜色]
        process_step {[int]} -- [以step分钟的间隔处理整个视频,内存越大step可以越大]
    """
    video = mmcv.VideoReader(input_path)
    # 初始化人头追踪器
    psn_tracker = Tracker()
    resolution = (video.width, video.height)
    video_fps = video.fps
    #ds = DetectionSifter(int(video_fps),osp.basename(args.input_path).split('.')[0],1,3,resolution,get_collection())
    if require_fps is None:
        require_fps = video_fps
    if require_fps > video_fps:
        require_fps = video_fps
    vwriter = cv2.VideoWriter(output_path, VideoWriter_fourcc(*fourcc),
                              require_fps, resolution)
    for frame in tqdm(video):
        # bbox:(hat_bbox,person_bbox)
        st = time.time()
        bboxs = inference_detector(model, frame)
        et = time.time()
        Loger.info('探测耗时{0}'.format(et - st))
        frame_result = get_result(frame,
                                  bboxs,
                                  class_names=model.CLASSES,
                                  auto_thickness=True,
                                  color_dist={
                                      'hat': 'green',
                                      'person': 'red'
                                  })
        # person_bboxs:(N,5)
        person_bboxs = bboxs[1]
        # 筛选阈值大于0.5进行追踪
        person_bboxs = person_bboxs[person_bboxs[:, 4] > 0.5]
        person_bboxs = np.expand_dims(person_bboxs, 0)
        person_bboxs_tracks = track(person_bboxs, psn_tracker)[0]
        #ds.add_object(person_bboxs_tracks,frame)
        vwriter.write(frame_result)
    #ds.clear()
    print('process finshed')
Esempio n. 13
0
def send_welcome(message):
    def _registration(message):
        if message.text == REGISTRATION_CODE:
            bot.send_chat_action(message.chat.id, STATUS_TYPING)
            bot.send_message(chat_id, "Excellent, thanks!")
            bot.send_message(chat_id, "Let me start our conversation by sharing a dummy log that you can use to test my capabilities...")
            bot.send_document(chat_id, open("logs/firstLog.xes", "rb"))
            copyfile("logs/firstLog.xes", pm.get_log_filename(chat_id))
            pm.set_property(chat_id, "current_log", pm.get_log_filename(chat_id))
            pm.set_property(chat_id, "log_original_name", "firstLog.xes")
            bot.send_message(chat_id, "If you want, you can also share another log with me, simply by uploading it here")
            pm.set_property(chat_id, "registered", True)
        else:
            bot.send_chat_action(message.chat.id, STATUS_TYPING)
            bot.reply_to(message, "I'm sorry, this license code is not correct \u2639")
            pm.set_property(chat_id, "registered", False)

    chat_id = message.chat.id
    tracker.track(chat_id, "start")
    bot.send_chat_action(message.chat.id, STATUS_TYPING)
    bot.send_message(chat_id, "Hi " + message.from_user.first_name + ", and welcome to the Process Mining Bot!")
    markup = types.ForceReply(selective=False)
    license = bot.send_message(chat_id, "I need to know your license code: (a valid code is \""+ REGISTRATION_CODE +"\")", reply_markup=markup)
    bot.register_next_step_handler(license, _registration)
Esempio n. 14
0
def move():
    x,y = tracker.track();

    if x > range:
        if x > 0:
            value = '+'
        else:
            value = '-'
        device.adjust_direction('x', value)

    if y > range:
        if y > 0:
            value = '+'
        else:
            value = '-'

        device.adjust_rotor(value)
Esempio n. 15
0
def new_log_file(message):
    chat_id = message.chat.id
    if check_registration(message):
        if message.document.mime_type == "application/xml" and message.document.file_name.split(".")[-1] == "xes":
            if int(message.document.file_size) <= (MAX_FILE_SIZE_IN_MB * 1000000):
                tracker.track(message.chat.id, "newXesLog")
                file_info = bot.get_file(message.document.file_id)
                file = requests.get('https://api.telegram.org/file/bot{0}/{1}'.format(API_TOKEN, file_info.file_path))
                pm.set_log(message.chat.id, file.content, message.document.file_name)
                bot.send_chat_action(chat_id, STATUS_TYPING)
                bot.send_message(chat_id, "Thanks, I received the new log!")
            else:
                bot.send_chat_action(chat_id, STATUS_TYPING)
                bot.reply_to(message, "Ops, currently, I support only files smaller than " + str(MAX_FILE_SIZE_IN_MB) + "MB")
        elif message.document.mime_type == "application/zip" and message.document.file_name.split(".")[-1] == "zip":
            if int(message.document.file_size) <= (MAX_FILE_SIZE_IN_MB * 1000000):
                tracker.track(message.chat.id, "newZipLog")
                file_info = bot.get_file(message.document.file_id)
                file = requests.get('https://api.telegram.org/file/bot{0}/{1}'.format(API_TOKEN, file_info.file_path))
                new_file, tmp_zip = tempfile.mkstemp()
                open(tmp_zip, 'wb').write(file.content)
                is_encrypted = False
                zf = zipfile.ZipFile(tmp_zip)
                if len(zf.namelist()) != 1 or zf.namelist()[0].split(".")[-1] != "xes":
                    bot.send_chat_action(message.chat.id, STATUS_TYPING)
                    bot.reply_to(message, "Ops, the <code>.zip</code> file should contain just one file, <code>.xes</code> file!", parse_mode="html")
                else:

                    for z_info in zf.infolist():
                        is_encrypted = z_info.flag_bits & 0x1
                    if is_encrypted:
                        def _pwd(message_rep):
                            try:
                                file_content_2 = zf.read(zf.namelist()[0], pwd=message_rep.text.encode('cp850','replace'))
                                pm.set_log(message.chat.id, file_content_2, zf.namelist()[0])
                                bot.send_chat_action(message.chat.id, STATUS_TYPING)
                                bot.send_message(message.chat.id, "Thanks, I received the new log!")
                            except RuntimeError as e:
                                bot.send_chat_action(message.chat.id, STATUS_TYPING)
                                bot.send_message(message.chat.id, str(e))
                                bot.send_chat_action(chat_id, STATUS_TYPING)
                                markup = types.ForceReply(selective=False)
                                pwd_msg = bot.send_message(chat_id, "Enter the password for the <code>.zip</code> file:", reply_markup=markup, parse_mode="html")
                                bot.register_next_step_handler(pwd_msg, _pwd)

                        bot.send_chat_action(chat_id, STATUS_TYPING)
                        markup = types.ForceReply(selective=False)
                        pwd_msg = bot.send_message(chat_id, "The <code>.zip</code> file is encrypted, please write me the password:"******"html")
                        bot.register_next_step_handler(pwd_msg, _pwd)

                    else:
                        file_content = zf.read(zf.namelist()[0])
                        pm.set_log(message.chat.id, file_content, zf.namelist()[0])
                        bot.send_chat_action(message.chat.id, STATUS_TYPING)
                        bot.send_message(message.chat.id, "Thanks, I received the new log!")
            else:
                bot.send_chat_action(message.chat.id, STATUS_TYPING)
                bot.reply_to(message, "Ops, currently, I support only files smaller than " + str(MAX_FILE_SIZE_IN_MB) + "MB")
        else:
            bot.send_chat_action(message.chat.id, STATUS_TYPING)
            bot.reply_to(message, "Currently, I support only <code>.xes</code> or <code>.zip</code> files, sorry!", parse_mode="html")
                [boxes, scores, classes, num_detections],
                feed_dict={image_tensor: image_np_expanded})

            # Process Outputs
            squeezed_boxes = np.squeeze(boxes)
            squeezed_classes = np.squeeze(classes).astype(np.int32)
            squeezed_scores = np.squeeze(scores)

            # Process Frames (generate Object ID) and Track Frames
            print('======= Frame ' + str(frame_count) + ' =========')
            current_frame = tracker.make_frame_info(frame_count,
                                                    squeezed_boxes,
                                                    squeezed_classes,
                                                    squeezed_scores,
                                                    image_np.shape, image_np)
            current_frame = tracker.track(past_frames, current_frame)
            past_frames = tracker.append_current_to_past_frame_info(
                past_frames, current_frame)
            print('(---- End of Frame ' + str(frame_count) + ' ----) \n')

            # Visualization of the results of a detection.
            vis_util.visualize_boxes_and_labels_on_image_array(
                image_np,
                squeezed_boxes,
                squeezed_classes,
                squeezed_scores,
                category_index,
                use_normalized_coordinates=True,
                line_thickness=4)

            # save path to capture raw frames
Esempio n. 17
0
def plot_boxes(i,
               frame_no,
               img,
               boxes,
               class_names,
               plot_labels=True,
               color=None):
    queue_counter = 0
    frame_no = frame_no
    # Define a tensor used to set the colors of the bounding boxes
    colors = torch.FloatTensor([[1, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 0],
                                [1, 1, 0], [1, 0, 0]])

    # Define a function to set the colors of the bounding boxes
    def get_color(c, x, max_val):
        ratio = float(x) / max_val * 5
        i = int(np.floor(ratio))
        j = int(np.ceil(ratio))

        ratio = ratio - i
        r = (1 - ratio) * colors[i][c] + ratio * colors[j][c]

        return int(r * 255)

    # Get the width and height of the image
    width = img.shape[1]
    height = img.shape[0]

    # Create a figure and plot the image
    fig, a = plt.subplots(1, 1)
    a.imshow(img)
    #    cv2.imwrite('D:\HeadCount\YOLO-Object-Detection-master\YOLO-Object-Detection-master\Output\out' +str(frame_no)+'.jpeg', img)

    # Plot the bounding boxes and corresponding labels on top of the image
    for i in range(len(boxes)):
        rects = []

        # Get the ith bounding box
        box = boxes[i]
        c = box[6]
        if class_names[c] == 'person':
            #            print('true: ',class_names[c])
            # Get the (x,y) pixel coordinates of the lower-left and lower-right corners
            # of the bounding box relative to the size of the image.
            x1 = int(np.around((box[0] - box[2] / 2.0) * width))
            y1 = int(np.around((box[1] - box[3] / 2.0) * height))
            x2 = int(np.around((box[0] + box[2] / 2.0) * width))
            y2 = int(np.around((box[1] + box[3] / 2.0) * height))

            # Set the default rgb value to red
            rgb = (1, 0, 0)

            # Use the same color to plot the bounding boxes of the same object class
            if len(box) >= 7 and class_names:
                cls_conf = box[5]
                cls_id = box[6]
                classes = len(class_names)
                offset = cls_id * 123457 % classes
                red = get_color(2, offset, classes) / 255
                green = get_color(1, offset, classes) / 255
                blue = get_color(0, offset, classes) / 255

                # If a color is given then set rgb to the given color instead
                if color is None:
                    rgb = (red, green, blue)
                else:
                    rgb = color

            # Calculate the width and height of the bounding box relative to the size of the image.
            width_x = x2 - x1
            #        width_y = y1 - y2
            width_y = -200

            #        print(width_y)

            #
            #        check_point = consider.check(0, 270, 530, 310, 530, 170,
            #                    0, 200, x1, y2)

            check_point = consider.check(0, 150, 576, 208, 576, 300, 0, 215,
                                         x1, y2)  # 99%
            #        save_point = 0

            if check_point == 1:  #.....................
                #                print(x1,y1," ",x2,y2)
                queue_counter += 1
                rect = patches.Rectangle((x1, y2),
                                         width_x,
                                         width_y,
                                         linewidth=2,
                                         edgecolor=rgb,
                                         facecolor='none')

                rects.append(box)

                track_no = tracker.track(x1, x2, y1, y2, frame_no)
                # Draw the bounding box on top of the image
                a.add_patch(rect)  #.........................
                #    centroid = centroids_tracker.CentroidTracker()
                #    trackable_object = centroid.updates(rects)
                #    print(trackable_object)
                label = str(track_no)
                lxc = (img.shape[1] * 0.266) / 100
                lyc = (img.shape[0] * 1.180) / 100
                a.text(x1 + lxc,
                       y1 - lyc,
                       label,
                       fontsize=24,
                       color='k',
                       bbox=dict(facecolor=rgb, edgecolor=rgb, alpha=0.8))

    print('person standing in queue: ', queue_counter)
    framename = 'output' + str(frame_no)
    dataentry.datawrite(framename, queue_counter)
    plt.savefig('D:\Queue Managment v1\Output\out' + str(frame_no) + '.png')
    im = cv2.imread('D:\Queue Managment v1\Output\out' + str(frame_no) +
                    '.png')
    imS = cv2.resize(im, (800, 700))
    cv2.imshow('output', imS)
    #    cv2.imwrite('OutputTest', im)
    cv2.waitKey(2)
Esempio n. 18
0
    def run(self):
        cap = fc2.Context()
        cap.connect(*cap.get_camera_from_index(0))
        cap.set_video_mode_and_frame_rate(fc2.VIDEOMODE_640x480Y8,
                                          fc2.FRAMERATE_15)
        m, f = cap.get_video_mode_and_frame_rate()
        p = cap.get_property(fc2.FRAME_RATE)
        cap.set_property(**p)
        cap.start_capture()

        cy = r[1] + int(r[3] / 2)
        cx = r[0] + int(r[2] / 2)

        x = r[0]
        y = r[1]
        w = r[2]
        h = r[3]

        photos = []
        slidingWindow = []

        for i in range(self.nFrames):
            img = fc2.Image()
            cap.retrieve_buffer(img)
            frame = np.array(img)

            photos.append(frame)

            smallFrame = frame[y:y + h, x:x + w]
            smallFrame = np.multiply(smallFrame, mask)
            smallFrame[np.where(smallFrame == 0)] = 255

            detect, trackedFrame, pos = track(smallFrame, 120, config.flySize)

            if pos[1] is None:
                slidingWindow.append(np.mean(slidingWindow))
            else:
                slidingWindow.append(pos[1])

            cv2.imshow("Frame", trackedFrame)

            #cv2.imwrite(self.saveDir+'/{}.jpg'.format(i), frame)

            # Code for walking direction
            # 0 = No walking
            # 1 = Walking towards top
            # 2 = Walking towards bottom

            if i > 2:
                slidingWindow.pop(0)
                d = np.diff(slidingWindow)
                da = np.mean(d)

                if da > 2:
                    signal = 2
                elif da < -2:
                    signal = 1
                else:
                    signal = 0
            else:
                signal = 0

            logging.debug('{},{},{},{},{}'.format(
                str(time.time() - globalZeroTime), 'reinforcement', str(i),
                str(pos[0]), str(pos[1]))),

            if not q.full():
                # This will be set conditionally by the tracker
                item = signal
                q.put(item)
                #logging.debug('Putting {} in queue'.format(signal))

            # Video keyboard interrupt
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        if self.save_dir is not None:
            with open(self.save_dir + '/photos.pkl', 'wb') as f:
                pickle.dump(photos, f)
        else:
            pass

        cap.stop_capture()
        cap.disconnect()
        cv2.destroyAllWindows()
        cv2.waitKey()

        return
Esempio n. 19
0
#Importing libraries

from tracker import track
import time

#Run always
while (True):
    #call track function from tracker.py
    track()
    #provide delay, like 60*60*24(To run the script daily)
    time.sleep(60 * 60 * 24)
Esempio n. 20
0
def start_enter(event):
    tracker.track(video_path.get(), int(contour_size_thresh.get()),
                  int(dist_thresh.get()), int(penalty_thresh.get()),
                  output_dir.get(), debug.get())