Example #1
0
def create_gif(input_folder_path, output_path):

    imgs_lst = os.listdir(input_folder_path)
    frames = []
    for i in range(0, int(len(imgs_lst)), 2):
        image_file = input_folder_path + "\\" + imgs_lst[i]

        img = cv2.imread(image_file)

        size = (640, 480)
        img = cv2.resize(img, size)

        cv2.namedWindow("img", cv2.WINDOW_NORMAL)
        cv2.imshow("img", img)
        cv2.waitKey(1)

        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        im_pil = Image.fromarray(img)
        frames.append(im_pil)

        perc = i / len(imgs_lst)
        progress_bar.update_progress_bar(perc, "time")

    cv2.destroyAllWindows()

    frames[0].save(output_path,
                   format='GIF',
                   append_images=frames[1:],
                   save_all=True,
                   duration=60,
                   loop=0)
Example #2
0
def main(args):
    """ """
    # Parse PCAPs to skip
    to_skip_md5 = []
    if args.skip:
        with open(args.skip, 'r') as fr:
            for pos, line in enumerate(fr):
                fpcap_md5 = json.loads(line.strip('\n'))[1]
                to_skip_md5.append(fpcap_md5)

    # Prepare list of pcap files to be processed
    if args.dir:
        pcap_files = glob.glob(args.dir + '/*.pcap')
    elif args.pcap:
        pcap_files = [args.pcap]
    total_pcaps_cnt = len(pcap_files)

    if not os.path.isdir(TMP_DIR):
        os.mkdir(TMP_DIR)

    if not os.path.isdir(args.dcerts):
        os.mkdir(args.dcerts)

    certs_dir = os.path.abspath(args.dcerts)

    # Main loop to process each pcap file
    pb.update_progress_bar(0, total_pcaps_cnt)
    with open(args.ofile, 'a') as fw:
        for pos, pcap_file in enumerate(pcap_files):
            if pos and pos % 50 == 0:
                pb.update_progress_bar(pos, total_pcaps_cnt)

            pcap_fname = os.path.basename(pcap_file)
            # Get pcap hash from file name, if proceeds
            fhash, fhash_type, run_id = parse_pcap_name(pcap_fname)
            pcap_md5 = md5_file(pcap_file)
            if pcap_md5 in to_skip_md5:
                sys.stderr.write("[+] Skipping file: {}\n".format(pcap_fname))
                continue

            sys.stderr.write("[+] Processing file: {}\n".format(pcap_fname))

            try:
                log_entries = run_bro(pcap_file, certs_dir)
                fw.write('{}\n'.format(
                    json.dumps([pcap_fname, pcap_md5, log_entries])))
            except Exception as e:
                sys.stderr.write('\t[-] Error: {}\n'.format(repr(e)))
    pb.update_progress_bar(pos, total_pcaps_cnt)
def main(args):
    """ """
    cwd = os.getcwd()

    if not os.path.isdir(args.dcerts):
        os.mkdir(args.dcerts)

    certs_dir = os.path.abspath(args.dcerts)

    # Move to the logs dir
    os.chdir(args.dir)

    # Prepare list of pcap files to be processed
    logs = glob.glob('*.log')
    if not logs:
        sys.stderr.write('[-] Missed bro logs.\n')
        exit(1)

    # Total operations to be done
    total_logs_cnt = len(logs) + 1

    with open(os.path.join(cwd, args.ofile), 'w', 0) as fw:
        first = True
        #TODO: Transform input dir name into hash
        fw.write('["{}", "NOMD5", '.format(args.dir))
        fw.write('{')
        for pos, log in enumerate(logs):
            pb.update_progress_bar(pos, total_logs_cnt)
            # This is a log file used internally by our Bro script; Ignore it
            if log == 'dns_cache.log':
                continue

            # With large files we will only parse these files
            if args.optimize and log != 'ssl_dm.log' and log != 'ssl_certs.log':
                continue

            # When Bro fails our custom logs are created but they are empty
            if not os.path.getsize(log):
                sys.stderr.write('[-] Log is empty: {}\n'.format(log))
                continue

            sys.stderr.write("[+] Parsing log file: {}\n".format(log))

            try:
                # Get all records from this log
                records = parse_log(os.path.abspath(log))

                # Store records for this log in JSON format
                if records:

                    if not first:
                        fw.write(',')

                    fw.write('"' + log + '":[')
                    for n, entry in enumerate(records):
                        if n:
                            fw.write(',')
                        json.dump(entry._asdict(), fw)
                        fw.flush()
                    fw.write(']')
                    first = False

                else:
                    sys.stderr.write(
                        '[-] No records for log: {}\n'.format(log))

            except Exception as e:
                sys.stderr.write('\t[!] Error: {}\n'.format(repr(e)))

            fw.flush()

        fw.write('}]\n')
        pb.update_progress_bar(pos, total_logs_cnt - 1)

    # Store certificates in certs directory
    os.system("cp *.pem {}/ 2> /dev/null".format(certs_dir))

    pb.update_progress_bar(pos, total_logs_cnt)

    # Return to current working directory
    os.chdir(cwd)
Example #4
0
def run_tracker_wrapper(tracker_types, run_images_from_folder,
                        video_or_folder_name, frame_to_start, object_name,
                        output_path, first_tiral):
    """ The function runs a few opencv2 trackers on a video
    (which has previously been split into frames)
        then it saves a video of the results

        Parameters
        ----------
        tracker_types : list
                    a list of the tracker types to run
                    the names should match the types specified in create_tracker funciton
                    example: ['MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'MOSSE', 'CSRT']
        run_images_from_folder : bool
                    if true run on frames what were extraacted from a video, if false run movie.
        video_or_folder_name : str
                    Path to a folder contains videos frames (assumed to be in YUV format).
                    example: 'C:\\ExamplesOfInputs4Testing\\vot2016Simple\\blanket_yuv'
        frame_to_start : int
                    frame to start tracking
                    example: 0
        object_name : str
                    name to add to end of outputs
                    example: 'obj1'
       """

    # Create MultiTracker object
    try:
        multi_tracker = cv2.MultiTracker_create()
    except AttributeError:
        multi_tracker = cv2.legacy.MultiTracker_create()

    # load first frame
    if run_images_from_folder:
        video = []
        video_files = list_images_in_path(video_or_folder_name)
        if video_files == []:
            print("bad file format for imgs in path!, no files found")
            sys.exit()
    else:
        video = cv2.VideoCapture(video_or_folder_name)
        # Exit if video not opened.
        if not video.isOpened():
            print("Could not open video")
            sys.exit()
        run_images_from_folder = []
        video_files = []

    ok, frame, f_name = load_image_from_file_or_video(run_images_from_folder,
                                                      video_files,
                                                      frame_to_start, video)
    if not ok:
        print('Cannot read load_image_from_file_or_video at init')
        sys.exit()
# =============================================================================
#     # resize frame if necessary
#     shape_frame = frame.shape
#     if shape_frame[1] > 1920:
#         AR_frame = float(shape_frame[1]) / float(shape_frame[0])
#         width_frame = 1920
#         height_frame = round(float(width_frame) / AR_frame)
#     else:
#         width_frame = shape_frame[1]
#         height_frame = shape_frame[0]
# =============================================================================
## Get initial bounding box from user input
    init_bboxs = get_initial_bounding_boxs(frame)
    cv2.destroyAllWindows()

    ## If no ROIs were selected break off
    if len(init_bboxs) == 0:
        print("no ROI selected!")
        return

    ## Initialize MultiTracker
    for tracker_type in tracker_types:
        for bbox in init_bboxs:
            print(tracker_type)
            multi_tracker.add(create_tracker(tracker_type), frame, bbox)

    # Get colors to display on screen
#    colors = get_box_colors()

# set video output for saving
    video_out = create_video_results(video_or_folder_name, frame, object_name,
                                     output_path)

    # save current rois from all trackers to txt files
    path_tracker_rois = save_result_rois(video_or_folder_name, [], init_bboxs,
                                         tracker_types, True, object_name,
                                         f_name, output_path, first_tiral)
    i_frame = frame_to_start
    while i_frame < len(video_files):
        # Auto tracking
        i_frame, k = tracker_loop(i_frame,
                                  run_images_from_folder,
                                  video_files,
                                  video,
                                  multi_tracker,
                                  video_or_folder_name,
                                  tracker_types,
                                  object_name,
                                  output_path,
                                  first_tiral,
                                  video_out,
                                  colors=get_box_colors())
        # Esc
        if k == 27:
            break
        # Manual tracking
        elif k == 112:  # p key
            if run_images_from_folder == True:
                i_frame, manual_k = manual_tracking.loop_through_imgs(
                    video_or_folder_name, path_tracker_rois, i_frame,
                    video_out)
            else:
                print("Nope... can't track manualy with video")
                break
            # back to Auto tracking
            if manual_k == 27:  #if Esc Key from the tracker continue Auto tracker

                multi_tracker = cv2.MultiTracker_create()

                ok, frame, f_name = load_image_from_file_or_video(
                    run_images_from_folder, video_files, i_frame, video)
                # Get initial bounding box from user input
                init_bboxs = get_initial_bounding_boxs(frame)
                cv2.destroyAllWindows()

                ## If no ROIs were selected break off
                if len(init_bboxs) == 0:
                    print("no ROI selected!")
                    break

                # Re-Initialize MultiTracker
                for tracker_type in tracker_types:
                    for bbox in init_bboxs:
                        print(tracker_type)
                        multi_tracker.add(create_tracker(tracker_type), frame,
                                          bbox)

                path_tracker_rois = save_result_rois(video_or_folder_name, [],
                                                     init_bboxs,
                                                     tracker_types,
                                                     True,
                                                     object_name,
                                                     f_name,
                                                     output_path,
                                                     first_tiral=False)

    percent_done = i_frame / len(video_files)
    progress_bar.update_progress_bar(percent_done, str(i_frame))
    print('\nreached end of video files: frame_number=' + str(i_frame))

    video_out.release()
    cv2.destroyAllWindows()
Example #5
0
def tracker_loop(
    frame_to_start,
    run_images_from_folder,
    video_files,
    video,
    multi_tracker,
    video_or_folder_name,
    tracker_types,
    object_name,
    output_path,
    first_tiral,
    # first_tiral is mine and for difrence between "a" and "w" writing!
    video_out,
    colors=get_box_colors()):
    str_img_time = img_procesing.get_time(video_files[0])
    # loop through frames
    i_frame = frame_to_start
    print(i_frame)
    k = 0
    while True:
        try:
            ok, frame, f_name = load_image_from_file_or_video(
                run_images_from_folder, video_files, i_frame, video)
            # If the ok came back False, we didn't load an img
            if not ok:
                k = 27
                break
            # Start timer
            timer = cv2.getTickCount()
            # get updated location of objects in subsequent frames
            # start_time = time.time()  # start time of the loop
            ok, boxes = multi_tracker.update(frame)
            # if lost tracking break
            if not ok:
                print("\nLost it at -", i_frame)
                return i_frame, 112

            # save current rois from all trackers to txt files
            path_tracker_rois = save_result_rois(video_or_folder_name, boxes,
                                                 [], tracker_types, False,
                                                 object_name, f_name,
                                                 output_path, first_tiral)

            frame_resized = frame
            #            frame_resized = lab_filter(frame_resized)
            #            frame_resized = cv2.resize(frame, (width_frame, height_frame))
            frame_with_box = draw_bounding_box(
                frame_resized,
                boxes,
                ok,
                timer,  #ground_truth_bbox,
                tracker_types,
                colors,
                i_frame,
                path_tracker_rois)

            t = img_procesing.get_time(f_name)
            dt = img_procesing.get_time_delta(str_img_time, t)
            img_procesing.text_on_img(frame_with_box, dt)

            cv2.namedWindow('Tracking', cv2.WINDOW_NORMAL)
            cv2.imshow("Tracking", frame_with_box)

            video_out.write(frame_with_box)

            # Exit if ESC pressed
            k = cv2.waitKey(1) & 0xff
            if k == 27:  # Esc key
                print("\nCaught ESC")
                break
            if k == 112:  # p key
                print("\nPaused, go do some manual work!\n last one was",
                      i_frame)
                break

            # Update progress bar
            if (i_frame % 10) == 0 or i_frame == 1:
                percent_done = i_frame / len(video_files)
                progress_bar.update_progress_bar(percent_done, str(i_frame))

            i_frame += 1
        except KeyboardInterrupt:
            print("\nCaught Keyboard Interrupt")
            print(i_frame)
            k = 27
            break

#        except ValueError:
#            print("asd")
#            break
    return i_frame, k
Example #6
0
def create_video(input_folder_path, outvid_path, fps):
    """
    create video from images in input_folder_path
    fps needs to be a float type!
    """
    imgs_lst = os.listdir(input_folder_path)
    # =============================================================================
    #     ## get size from first img
    #     ## there might be a limit to what your video player can show
    #     ## so we half it (for now)
    #     image0 = input_folder_path +"\\"+ imgs_lst[0]
    #     img0 = cv2.imread(image0)
    #     size = (int(img0.shape[1]/2), int(img0.shape[0]/2))
    #     size = ((img0.shape[1]), (img0.shape[0]))
    #     size = (640,480)
    # =============================================================================
    # size = (1980,1080)
    size = (1080, 720)
    # size = (640,480)

    ## set params for the vid_output
    is_color = True
    #    fourcc = cv2.VideoWriter_fourcc(*"XVID") ## .avi
    fourcc = cv2.VideoWriter_fourcc(*'MP4V')  ## .mp4
    vid = cv2.VideoWriter(outvid_path, fourcc, fps, size, is_color)

    try:
        mean_history = []
        #        frames = []            ## for gif saving
        for i in range(0, int(len(imgs_lst[:600]))):  #,3):
            image_file = input_folder_path + "\\" + imgs_lst[i]
            img = cv2.imread(image_file)
            #            print(img.shape)

            ## skip dark imgs
            #            if img.mean()<10:
            #                continue

            if type(img) != np.ndarray:  ## just trying to catch common errors
                break


# ========== if anything should be done with img enter code here ==============
#            img = img_procesing.rotate_img(img,-90)
#
#            dots_img = remove_blue(img)
#            img = np.concatenate((img, dots_img), axis=1) ## add next to each other
#
#            size = (int(img.shape[1]/2), int(img.shape[0]/2))

#            flipHorizontal = cv2.flip(originalImage, 1) ## ??
#            print(size[0], size[1])
#            print(size[0]/size[1])
#            break

#            img = cv2.fastNlMeansDenoisingColored(img, None, 15, 10, 7, 21)
#            img_procesing.text_on_img(img, img_procesing.get_time(image_file)) ## add time stamp
#
#            img_procesing.text_on_img(img, "\n" + image_file)
#            img_procesing.text_on_img(img, "\n" + image_file.split("\\")[-1])
#            print(image_file.split("\\"))

# draw points from dfs

            try:
                xsum = 0
                ysum = 0
                for j in range(0, len(dfs.iloc[i]), 2):
                    try:
                        round_x = round(dfs.iloc[i][j])
                        round_y = round(dfs.iloc[i][j + 1])
                        xsum += dfs.iloc[i][j]
                        ysum += dfs.iloc[i][j + 1]
                        cv2.circle(img, (round_x, round_y), 2, (0, 0, 255), -1)
                    except ValueError:
                        img_procesing.text_on_img(img,
                                                  "lost point...",
                                                  dx=20,
                                                  dy=20)
                        continue
                mean_x = round(xsum / (len(dfs.iloc[i]) / 2))
                mean_y = round(ysum / (len(dfs.iloc[i]) / 2))
                mean_history.append((mean_x, mean_y))
                for dot in mean_history[-20:]:
                    cv2.circle(img, dot, 2, (0, 255, 255), -1)
            except NameError:
                pass
                # print("didn't load dfs... not draing points")
            size = (1080, 720)  ##(3145, 1016)
            img = cv2.resize(img, size)
            # =============================================================================

            ## show img while processing
            cv2.namedWindow("img", cv2.WINDOW_NORMAL)
            cv2.imshow("img", img)
            cv2.waitKey(1)

            ## write to video file
            vid.write(img)

            ## just for the progress bar:
            perc = i / len(imgs_lst)
            progress_bar.update_progress_bar(perc)

        progress_bar.update_progress_bar(1)

    except KeyboardInterrupt:
        print("\nKeyboard Interrupt...")

    vid.release()
    cv2.destroyAllWindows()