Пример #1
0
async def fly_garbage(canvas, column, garbage_frame, speed=0.5):
    """Animate garbage, flying from top to bottom. Сolumn position will stay same, as specified on start."""

    rows_number, columns_number = canvas.getmaxyx()

    column = max(column, 0)
    column = min(column, columns_number - 1)

    # Get frame row size and run it higher so
    # that it appears gradually instead suddenly
    row = -utils.get_frame_size(garbage_frame)[0] + BORDER_WIDTH + 1

    row_size, column_size = utils.get_frame_size(garbage_frame)
    obstacle = Obstacle(row, column, row_size, column_size)
    obstacles.append(obstacle)

    while obstacle.row < rows_number - BORDER_WIDTH * 2 - DERIVED_WINDOW_HEIGHT:
        if obstacle in obstacles_in_last_collisions:
            obstacles_in_last_collisions.remove(obstacle)
            coroutines.append(
                explode(canvas, obstacle.row + row_size // 2,
                        obstacle.column + column_size // 2))
            break
        utils.draw_frame(canvas, obstacle.row, obstacle.column, garbage_frame)
        await asyncio.sleep(0)
        utils.draw_frame(canvas,
                         obstacle.row,
                         obstacle.column,
                         garbage_frame,
                         negative=True)
        obstacle.row += speed
    obstacles.remove(obstacle)
    def run(self):
        while True:
            if self.shared_variables.camera_capture.isOpened():

                # show frame
                if self.shared_variables.detection_result is not None:

                    draw_frame(self.shared_variables.frame,
                               self.shared_variables.frame_size,
                               self.shared_variables.detection_result,
                               self.shared_variables.class_names,
                               self.shared_variables.model_size)

                    cv2.imshow("YOLO3 CPU", self.shared_variables.frame)

                # close program
                if cv2.waitKey(1) == 27:
                    break  # esc to quit
                if cv2.waitKey(25) & 0xFF == ord('q'):
                    break

        # terminate all threads
        self.shared_variables.tracking_running = False
        self.shared_variables.detection_running = False

        # stop camera
        self.shared_variables.camera_capture.release()
        cv2.destroyAllWindows()
Пример #3
0
async def fly_garbage(canvas, column, garbage_frame, speed=0.8):
    global obstacles
    """Animate garbage, flying from top to bottom. Сolumn position will stay same, as specified on start."""
    rows_number, columns_number = canvas.getmaxyx()

    column = max(column, 0)
    column = min(column, columns_number - 1)
    row = 0
    rows, columns = utils.get_frame_size(garbage_frame)
    obstacle = Obstacle(row, column, rows, columns)
    obstacles.append(obstacle)
    canvas.addstr('')
    while row < rows_number:
        draw_frame(canvas, row, column, garbage_frame)
        obstacle.row = row

        await asyncio.sleep(0)

        draw_frame(canvas, row, column, garbage_frame, negative=True)
        row += speed
        obstacle.row += speed
        if obstacle in obstacles_in_last_collisions:
            explode_row = row + (rows / 2)
            explode_column = column + (columns / 2)
            await explode(canvas, explode_row, explode_column)
            obstacles.remove(obstacle)
            return
    obstacles.remove(obstacle)
Пример #4
0
def handle_control_commands(canvas, frame, row_pozition, column_pozition,
                            limits, row_speed, column_speed):
    """Handle keyboard commands and change rocket coordinates."""

    row_direction, column_direction, space_pressed = \
        utils.read_controls(canvas)
    row_speed, column_speed = update_speed(row_speed, column_speed,
                                           row_direction, column_direction)

    utils.draw_frame(canvas,
                     row_pozition,
                     column_pozition,
                     frame,
                     negative=True)

    row_pozition += row_speed
    column_pozition += column_speed

    row_pozition = min(max(row_pozition, limits['min_row']), limits['max_row'])
    column_pozition = min(max(column_pozition, limits['min_column']),
                          limits['max_column'])

    if year > 2020 and space_pressed:
        fire_row = row_pozition
        fire_column = column_pozition + utils.get_frame_size(frame)[1] // 2
        coroutines.append(fire(canvas, fire_row, fire_column, rows_speed=-2))

    utils.draw_frame(canvas, row_pozition, column_pozition, frame)
    return row_pozition, column_pozition, row_speed, column_speed
def main(iou_threshold, confidence_threshold, input_names):
    global detection_result
    class_names = load_class_names(_CLASS_NAMES_FILE)
    n_classes = len(class_names)

    model = Yolo_v3(n_classes=n_classes,
                    model_size=_MODEL_SIZE,
                    max_output_size=_MAX_OUTPUT_SIZE,
                    iou_threshold=iou_threshold,
                    confidence_threshold=confidence_threshold)

    inputs = tf.placeholder(tf.float32, [1, *_MODEL_SIZE, 3])
    detections = model(inputs, training=False)
    saver = tf.train.Saver(tf.global_variables(scope='yolo_v3_model'))

    with tf.Session() as sess:
        saver.restore(sess, './weights/model.ckpt')

        win_name = 'Video detection'
        cv2.namedWindow(win_name)
        cap = cv2.VideoCapture(input_names)
        frame_size = (cap.get(cv2.CAP_PROP_FRAME_WIDTH),
                      cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fourcc = int(cap.get(cv2.CAP_PROP_FOURCC))
        fps = cap.get(cv2.CAP_PROP_FPS)
        if not os.path.exists('detections'):
            os.mkdir('detections')
        head, tail = os.path.split(input_names)
        name = './detections/' + tail[:-4] + '_yolo.mp4'
        out = cv2.VideoWriter(name, fourcc, fps,
                              (int(frame_size[0]), int(frame_size[1])))

        try:
            print("Show video")
            while (cap.isOpened()):
                ret, frame = cap.read()
                if not ret:
                    break
                resized_frame = cv2.resize(frame,
                                           dsize=_MODEL_SIZE[::-1],
                                           interpolation=cv2.INTER_NEAREST)
                detection_result = sess.run(
                    detections, feed_dict={inputs: [resized_frame]})
                draw_frame(frame, frame_size, detection_result, class_names,
                           _MODEL_SIZE)
                if ret == True:
                    cv2.imshow(win_name, frame)
                    out.write(frame)

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

        finally:
            cv2.destroyAllWindows()
            cap.release()
            print('Detections have been saved successfully.')
Пример #6
0
async def show_gameover(canvas):
    """Show gameover frame in the center of canvas."""
    rows_number, columns_number = canvas.getmaxyx()
    row_size, column_size = get_frame_size(GAMEOVER_FRAME)
    corner_row = rows_number // 2 - row_size // 2
    corner_column = columns_number // 2 - column_size // 2

    while True:
        draw_frame(canvas, corner_row, corner_column, GAMEOVER_FRAME)
        await asyncio.sleep(0)
Пример #7
0
async def explode(canvas, center_row, center_column):
    rows, columns = get_frame_size(EXPLOSION_FRAMES[0])
    corner_row = center_row - rows / 2
    corner_column = center_column - columns / 2

    curses.beep()
    for frame in EXPLOSION_FRAMES:
        draw_frame(canvas, corner_row, corner_column, frame)

        await asyncio.sleep(0)
        draw_frame(canvas, corner_row, corner_column, frame, negative=True)
        await asyncio.sleep(0)
Пример #8
0
async def explode(canvas, center_row, center_column):
    rows, columns = get_frame_size(EXPLOSION_FRAMES[0])
    corner_row = center_row - rows / 2
    corner_column = center_column - columns / 2
    # Subtract 4 columns to get rid of tabs in explosion frames
    corner_column -= 4

    beep()
    for frame in EXPLOSION_FRAMES:
        draw_frame(canvas, corner_row, corner_column, frame)
        await asyncio.sleep(0)
        draw_frame(canvas, corner_row, corner_column, frame, negative=True)
        await asyncio.sleep(0)
Пример #9
0
async def show_obstacles(canvas, obstacles):
    """Display bounding boxes of every obstacle in a list"""

    while True:
        boxes = []

        for obstacle in obstacles:
            boxes.append(obstacle.dump_bounding_box())

        for row, column, frame in boxes:
            draw_frame(canvas, row, column, frame)

        await asyncio.sleep(0)

        for row, column, frame in boxes:
            draw_frame(canvas, row, column, frame, negative=True)
Пример #10
0
    def run(self):
        fitness_trace = []
        try:
            money_num_gen = self.money_num_Gen()
            catch_time = next(money_num_gen)
            # 初始化种群
            pop = self.init_pops()
            for generation in trange(self.max_iter + 1):
                # 计算fitness
                fitnesses = self.calc_fitness(pop)
                fitness_trace.append(max(fitnesses))
                # 存个点
                if generation == catch_time:
                    if self.mut_rate > 0.1:
                        self.mut_rate *= 0.9
                    catch_time = next(money_num_gen)
                    idx = argmax(fitnesses)
                    chromos = [Chromo(i) for i in pop[idx]]
                    im = draw_frame(chromos, self.desired_size)
                    with open(self.desired_dir + "{}.png".format(generation),
                              'wb') as fp:
                        im.save(fp)
                # 筛选父代
                pop = self.selection(fitnesses, pop)
                # 交叉
                pop = self.crossover(pop)
                # 变异
                pop = self.mutation(pop)

        finally:
            plt.plot(fitness_trace)
            plt.savefig(self.desired_dir + "trace.png")
Пример #11
0
 def calc_fitness(self, pop: ndarray) -> ndarray:
     '''计算所有种群的适应度'''
     fitness = empty(len(pop))
     for i, shell in enumerate(pop):
         chromos = [Chromo(v) for v in shell]
         im = draw_frame(chromos, self.desired_size)
         fitness[i] = calc_similarity(self.target_img, im)
     return fitness
Пример #12
0
def main(iou_threshold, confidence_threshold, input_names):
    class_names = load_file(_CLASS_NAMES_FILE)
    n_classes = len(class_names)

    model = Yolo_v3(n_classes=n_classes,
                    model_size=_MODEL_SIZE,
                    max_output_size=_MAX_OUTPUT_SIZE,
                    iou_threshold=iou_threshold,
                    confidence_threshold=confidence_threshold)

    inputs = tf.placeholder(tf.float32, [1, *_MODEL_SIZE, 3])
    detections = model(inputs, training=False)
    saver = tf.train.Saver(tf.global_variables(scope='yolo_v3_model'))
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        saver.restore(sess, './weights/model.ckpt')

        win_name = 'Video detection'
        cv2.namedWindow(win_name)
        cap = cv2.VideoCapture(input_names[0])
        frame_size = (cap.get(cv2.CAP_PROP_FRAME_WIDTH),
                      cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fourcc = cv2.VideoWriter_fourcc(*'X264')
        fps = cap.get(cv2.CAP_PROP_FPS)
        out = cv2.VideoWriter(
            f'./detections/{input_names[0][7:-4]}_output.mp4', fourcc, fps,
            (int(frame_size[0]), int(frame_size[1])))
        counter = 0
        try:
            while True:
                ret, frame = cap.read()
                if not ret:
                    break
                resized_frame = cv2.resize(frame,
                                           dsize=_MODEL_SIZE[::-1],
                                           interpolation=cv2.INTER_NEAREST)
                detection_result = sess.run(
                    detections, feed_dict={inputs: [resized_frame]})

                counter = draw_frame(frame, frame_size, detection_result,
                                     class_names, _MODEL_SIZE, counter)

                cv2.imshow(win_name, frame)

                key = cv2.waitKey(1) & 0xFF

                if key == ord('q'):
                    break

                out.write(frame)
        finally:
            cv2.destroyAllWindows()
            cap.release()
            print('Detections have been saved successfully.')
Пример #13
0
async def control_rocket(canvas, rocket_frames, rows_number, columns_number):
    """Draw rocket frames, change its coordinates and fires by keyboard commands."""

    row_pozition, column_pozition = rows_number / 2, columns_number / 2
    frame_row_size = utils.get_frame_size(
        max(rocket_frames, key=get_frame_row_size))[0]
    frame_column_size = utils.get_frame_size(
        max(rocket_frames, key=get_frame_column_size))[1]
    limits = {
        'min_row':
        BORDER_WIDTH,
        'min_column':
        BORDER_WIDTH,
        'max_row': (rows_number - frame_row_size - BORDER_WIDTH * 2 -
                    DERIVED_WINDOW_HEIGHT),
        'max_column':
        columns_number - frame_column_size - BORDER_WIDTH,
    }
    row_speed = column_speed = 0
    rocket_animation = (rocket_frames[0], rocket_frames[0], rocket_frames[1],
                        rocket_frames[1])

    for frame in cycle(rocket_animation):
        for obstacle in obstacles:
            if obstacle.has_collision(row_pozition, column_pozition,
                                      frame_row_size - ROCKET_FIRE_ROW_SIZE,
                                      frame_column_size):
                coroutines.append(show_gameover(canvas))
                return
        utils.draw_frame(canvas, row_pozition, column_pozition, frame)
        row_pozition, column_pozition, row_speed, column_speed =\
            handle_control_commands(
                canvas, frame, row_pozition, column_pozition,
                limits, row_speed, column_speed
            )
        await asyncio.sleep(0)
        utils.draw_frame(canvas,
                         row_pozition,
                         column_pozition,
                         frame,
                         negative=True)
Пример #14
0
async def change_year_data(canvas):
    global year
    max_y, max_x = utils.get_terminal_size()

    while True:
        current_year = year.get('current_year')
        previous_message = utils.get_message(current_year - 1)
        utils.draw_frame(canvas,
                         round(max_y - 2),
                         round(2),
                         str(previous_message),
                         negative=True)
        message = utils.get_message(current_year)
        utils.draw_frame(canvas, round(max_y - 2), round(2), str(message))
        if current_year == 1961:
            orbit_with_garbage = fill_orbit_with_garbage(canvas)
            coroutines.append(orbit_with_garbage)
        if current_year == 2020:
            fire_animation = get_fire(canvas)
            coroutines.append(fire_animation)
        await utils.wait_time(CHANGE_YEAR_DELAY)
        year['current_year'] += 1
Пример #15
0
async def animate_spaceship(canvas, row, column, frames):
    prev_frame = ''
    prev_row, prev_column = (row, column)
    rows_number, columns_number = canvas.getmaxyx()

    while True:
        for frame in frames:
            draw_frame(canvas, prev_row, prev_column, prev_frame, negative=True)
            draw_frame(canvas, row, column, frame)

            prev_frame = frame
            prev_row, prev_column = (row, column)
            frame_rows, frame_columns = get_frame_size(prev_frame)

            rows_direction, columns_direction, space_pressed = read_controls(canvas)

            row += rows_direction
            column += columns_direction

            if row < 0 or row + frame_rows > rows_number:
                row = prev_row
            if column < 0 or column + frame_columns > columns_number:
                column = prev_column
            await asyncio.sleep(0)
Пример #16
0
async def run_spaceship(canvas):
    global year
    global spaceship_frame
    global coroutines
    global obstacles
    global spaceship_frame
    max_available_row, max_available_column = utils.get_terminal_size()
    row, column = max_available_row - 10, max_available_column / 2
    row_speed = column_speed = 0

    while True:
        current_year = year.get('current_year')
        row_frame, column_frame = get_frame_size(spaceship_frame)

        prev_sprite_row, prev_sprite_column = row, column
        prev_spaceship_frame = spaceship_frame
        canvas.nodelay(True)
        row_pos, column_pos, space = read_controls(canvas)

        row_speed, column_speed = update_speed(row_speed, column_speed,
                                               row_pos, column_pos)
        row += row_pos + row_speed
        column += column_pos + column_speed
        if space and current_year >= 2020:
            # for gun position in the center of the spaceship
            column_for_fire = column + 2
            fire_animation = fire(canvas,
                                  row,
                                  column_for_fire,
                                  rows_speed=FIRE_SPEED)
            coroutines.append(fire_animation)
        row = correct_row(max_available_row, row, row_frame)
        column = correct_column(max_available_column, column, column_frame)
        for obstacle in obstacles:
            if obstacle.has_collision(row, column):
                draw_frame(canvas,
                           prev_sprite_row,
                           prev_sprite_column,
                           prev_spaceship_frame,
                           negative=True)
                coroutines.append(utils.show_gameover(canvas))
                return
        await asyncio.sleep(0)
        draw_frame(canvas,
                   prev_sprite_row,
                   prev_sprite_column,
                   prev_spaceship_frame,
                   negative=True)
        draw_frame(canvas, row, column, spaceship_frame, negative=False)
Пример #17
0
def main(type,
         input_names,
         save_folder='./detections',
         iou_threshold=0.5,
         confidence_threshold=0.5,
         class_names_file=_CLASS_NAMES_FILE,
         create_csv=False):
    # Get class names and number
    class_names = load_class_names(class_names_file)
    n_classes = len(class_names)

    # Tensorflow prep
    tf.compat.v1.reset_default_graph()

    # Load Yolo_v3 model
    model = Yolo_v3(n_classes=n_classes,
                    model_size=_MODEL_SIZE,
                    max_output_size=_MAX_OUTPUT_SIZE,
                    iou_threshold=iou_threshold,
                    confidence_threshold=confidence_threshold)

    if type == 'images':

        # Load pictures and set up detection inputs
        batch_size = len(input_names)
        batch = load_images(input_names, model_size=_MODEL_SIZE)
        inputs = tf.compat.v1.placeholder(tf.float32,
                                          [batch_size, *_MODEL_SIZE, 3])
        detections = model(inputs, training=False)

        saver = tf.compat.v1.train.Saver(
            tf.compat.v1.global_variables(scope='yolo_v3_model'))

        # Load the weights model.ckpt and run detection on inputs
        with tf.compat.v1.Session() as sess:
            saver.restore(sess, './weights/model.ckpt')
            detection_result = sess.run(detections, feed_dict={inputs: batch})

        # Using detection results, draw detection boxes on input pictures and save them
        draw_boxes(input_names, detection_result, class_names, _MODEL_SIZE,
                   save_folder)

        print('Detections have been saved successfully.')

    elif type == 'video':

        # Set yolo_v3 to tensorflow
        inputs = tf.compat.v1.placeholder(tf.float32, [1, *_MODEL_SIZE, 3])
        detections = model(inputs, training=False)
        saver = tf.compat.v1.train.Saver(
            tf.compat.v1.global_variables(scope='yolo_v3_model'))

        # Run tensorflow session
        with tf.compat.v1.Session() as sess:
            # Load model
            saver.restore(sess, './weights/model.ckpt')

            # Create window for output video
            win_name = 'Video detection'
            cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
            cv2.resizeWindow(win_name, 1280, 720)

            # Create OpenCV capture and get video metadata
            cap = cv2.VideoCapture(input_names[0])
            frame_size = (cap.get(cv2.CAP_PROP_FRAME_WIDTH),
                          cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            fourcc = cv2.VideoWriter_fourcc(*'X264')
            fps = cap.get(cv2.CAP_PROP_FPS)

            # Set name and save destination for output video
            input_name_base = os.path.basename(input_names[0])
            video_save_path = save_folder + '/' + os.path.splitext(input_name_base)[0] + '_analysed' + \
                              os.path.splitext(input_name_base)[1]
            if os.path.splitext(input_name_base)[1] in _TO_MP4_FORMAT_LIST:
                video_save_path = save_folder + '/' + os.path.splitext(
                    input_name_base)[0] + '_analysed.mp4'

            # Create output video
            out = cv2.VideoWriter(video_save_path, fourcc, fps,
                                  (int(frame_size[0]), int(frame_size[1])))

            # Create csv file and insert row of time, frame, and classes if create_csv is set to True
            if create_csv:
                csv_save_path = save_folder + '/' + os.path.splitext(
                    input_name_base)[0] + '_statistics.csv'
                csv_field_names = class_names[:]
                csv_field_names.insert(0, "time")
                csv_field_names.insert(0, "frame")
                print(fps)
                sec_counter = 0
                with open(csv_save_path, 'w', newline='') as csv_file:
                    csv_writer = csv.writer(csv_file)
                    csv_file.write("sep=,")
                    csv_file.write('\n')
                    csv_writer.writerow(csv_field_names)
                csv_input_dict = {"frame": cap.get(cv2.CAP_PROP_POS_FRAMES)}
            try:
                while True:
                    ret, frame = cap.read()
                    if not ret:
                        break

                    # Resize frame to fit model and run detection
                    resized_frame = cv2.resize(frame,
                                               dsize=_MODEL_SIZE[::-1],
                                               interpolation=cv2.INTER_NEAREST)
                    detection_result = sess.run(
                        detections, feed_dict={inputs: [resized_frame]})
                    # In one second intervals, insert the maximum number of detections per class to a new row in csv
                    if create_csv:

                        csv_input_dict["frame"] = cap.get(
                            cv2.CAP_PROP_POS_FRAMES)
                        csv_input_dict["time"] = cap.get(cv2.CAP_PROP_POS_MSEC)
                        for cls in range(len(class_names)):
                            number_of_obj = len(detection_result[0][cls])
                            if number_of_obj != 0:
                                print(class_names[cls] + str(number_of_obj))
                                if class_names[cls] in csv_input_dict:
                                    csv_input_dict[class_names[cls]] = max(
                                        number_of_obj,
                                        csv_input_dict[class_names[cls]])
                                else:
                                    csv_input_dict[
                                        class_names[cls]] = number_of_obj
                        if cap.get(
                                cv2.CAP_PROP_POS_MSEC) / 1000 >= sec_counter:
                            with open(csv_save_path, 'a',
                                      newline='') as csv_file:
                                csv_writer = csv.DictWriter(
                                    csv_file, fieldnames=csv_field_names)
                                csv_writer.writerow(csv_input_dict)
                            sec_counter += 1
                            for cls in range(len(class_names)):
                                csv_input_dict.pop(class_names[cls], None)
                            print(sec_counter)

                    # Draw detection boxes on the frame being handled
                    draw_frame(frame, frame_size, detection_result,
                               class_names, _MODEL_SIZE)

                    # Show the current output frame on window
                    cv2.imshow(win_name, frame)

                    # Poll for key inputs, if 'q' is pressed, break to end processing video
                    key = cv2.waitKey(1) & 0xFF

                    if key == ord('q'):
                        break

                    # Write the current frame to the output file
                    out.write(frame)
            finally:
                cv2.destroyAllWindows()
                cap.release()
                print('Detections have been saved successfully.')

    # Not in use currently
    elif type == 'webcam':
        inputs = tf.compat.v1.placeholder(tf.float32, [1, *_MODEL_SIZE, 3])
        detections = model(inputs, training=False)
        saver = tf.compat.v1.train.Saver(
            tf.compat.v1.global_variables(scope='yolo_v3_model'))

        with tf.compat.v1.Session() as sess:
            saver.restore(sess, './weights/model.ckpt')

            win_name = 'Webcam detection'
            cv2.namedWindow(win_name)
            cap = cv2.VideoCapture(0)
            frame_size = (cap.get(cv2.CAP_PROP_FRAME_WIDTH),
                          cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            fourcc = cv2.VideoWriter_fourcc(*'X264')
            fps = cap.get(cv2.CAP_PROP_FPS)
            out = cv2.VideoWriter('./detections/detections.mp4', fourcc, fps,
                                  (int(frame_size[0]), int(frame_size[1])))

            try:
                while True:
                    ret, frame = cap.read()
                    if not ret:
                        break
                    resized_frame = cv2.resize(frame,
                                               dsize=_MODEL_SIZE[::-1],
                                               interpolation=cv2.INTER_NEAREST)
                    detection_result = sess.run(
                        detections, feed_dict={inputs: [resized_frame]})

                    draw_frame(frame, frame_size, detection_result,
                               class_names, _MODEL_SIZE)

                    cv2.imshow(win_name, frame)

                    key = cv2.waitKey(1) & 0xFF

                    if key == ord('q'):
                        break

                    out.write(frame)
            finally:
                cv2.destroyAllWindows()
                cap.release()
                print('Detections have been saved successfully.')

    else:
        raise ValueError(
            "Inappropriate data type. Please choose either 'video' or 'images'."
        )
Пример #18
0
def main(type, iou_threshold, confidence_threshold, venue, input_names):
    class_names = load_class_names(_CLASS_NAMES_FILE)
    n_classes = len(class_names)

    model = Yolo_v3(n_classes=n_classes,
                    model_size=_MODEL_SIZE,
                    max_output_size=_MAX_OUTPUT_SIZE,
                    iou_threshold=iou_threshold,
                    confidence_threshold=confidence_threshold)

    if type == 'images':
        batch_size = len(input_names)
        batch = load_images(input_names, model_size=_MODEL_SIZE)
        inputs = tf.compat.v1.placeholder(tf.float32,
                                          [batch_size, *_MODEL_SIZE, 3])
        detections = model(inputs, training=False)
        saver = tf.compat.v1.train.Saver(
            tf.compat.v1.global_variables(scope='yolo_v3_model'))

        with tf.compat.v1.Session() as sess:
            saver.restore(sess, './weights/model.ckpt')
            detection_result = sess.run(detections, feed_dict={inputs: batch})
            pass
        res = draw_boxes(input_names, detection_result, class_names,
                         _MODEL_SIZE)
        filtered_cars = [
            x for x in res if x.get("car") and x.get("car") > 60.0
        ]
        filtered_persons = [
            x for x in res if x.get("person") and x.get("person") > 60.0
        ]
        print('Detections have been saved successfully.')

        print(f'Number of persons: {len(filtered_persons)}')
        print(f'Number of cars: {len(filtered_cars)}')
        put_res_on_queue({
            "nmr_of_cars": len(filtered_cars),
            "nrm_of_persons": len(filtered_persons),
            "time_generated": int(datetime.now().timestamp()),
            "venue": venue
        })

    elif type == 'video':
        inputs = tf.compat.v1.placeholder(tf.float32, [1, *_MODEL_SIZE, 3])
        detections = model(inputs, training=False)
        saver = tf.compat.v1.train.Saver(
            tf.compat.v1.global_variables(scope='yolo_v3_model'))

        with tf.compat.v1.Session() as sess:
            saver.restore(sess, './weights/model.ckpt')

            win_name = 'Video detection'
            cv2.namedWindow(win_name)
            cap = cv2.VideoCapture(input_names[0])
            frame_size = (cap.get(cv2.CAP_PROP_FRAME_WIDTH),
                          cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            fourcc = cv2.VideoWriter_fourcc(*'X264')
            fps = cap.get(cv2.CAP_PROP_FPS)
            out = cv2.VideoWriter('./detections/detections.mp4', fourcc, fps,
                                  (int(frame_size[0]), int(frame_size[1])))

            try:
                while True:
                    ret, frame = cap.read()
                    if not ret:
                        break
                    resized_frame = cv2.resize(frame,
                                               dsize=_MODEL_SIZE[::-1],
                                               interpolation=cv2.INTER_NEAREST)
                    detection_result = sess.run(
                        detections, feed_dict={inputs: [resized_frame]})

                    draw_frame(frame, frame_size, detection_result,
                               class_names, _MODEL_SIZE)

                    cv2.imshow(win_name, frame)

                    key = cv2.waitKey(1) & 0xFF

                    if key == ord('q'):
                        break

                    out.write(frame)
            finally:
                cv2.destroyAllWindows()
                cap.release()
                print('Detections have been saved successfully.')

    elif type == 'webcam':
        inputs = tf.compat.v1.placeholder(tf.float32, [1, *_MODEL_SIZE, 3])
        detections = model(inputs, training=False)
        saver = tf.compat.v1.train.Saver(
            tf.compat.v1.global_variables(scope='yolo_v3_model'))

        with tf.compat.v1.Session() as sess:
            saver.restore(sess, './weights/model.ckpt')

            win_name = 'Webcam detection'
            cv2.namedWindow(win_name)
            cap = cv2.VideoCapture(0)
            frame_size = (cap.get(cv2.CAP_PROP_FRAME_WIDTH),
                          cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            fourcc = cv2.VideoWriter_fourcc(*'X264')
            fps = cap.get(cv2.CAP_PROP_FPS)
            out = cv2.VideoWriter('./detections/detections.mp4', fourcc, fps,
                                  (int(frame_size[0]), int(frame_size[1])))

            try:
                while True:
                    ret, frame = cap.read()
                    if not ret:
                        break
                    resized_frame = cv2.resize(frame,
                                               dsize=_MODEL_SIZE[::-1],
                                               interpolation=cv2.INTER_NEAREST)
                    detection_result = sess.run(
                        detections, feed_dict={inputs: [resized_frame]})

                    draw_frame(frame, frame_size, detection_result,
                               class_names, _MODEL_SIZE)

                    cv2.imshow(win_name, frame)

                    key = cv2.waitKey(1) & 0xFF

                    if key == ord('q'):
                        break

                    out.write(frame)
            finally:
                cv2.destroyAllWindows()
                cap.release()
                print('Detections have been saved successfully.')

    else:
        raise ValueError(
            "Inappropriate data type. Please choose either 'video' or 'images'."
        )
def main(type, iou_threshold, confidence_threshold, input_names):
    global detection_result
    class_names = load_class_names(_CLASS_NAMES_FILE)
    n_classes = len(class_names)

    model = Yolo_v3(n_classes=n_classes,
                    model_size=_MODEL_SIZE,
                    max_output_size=_MAX_OUTPUT_SIZE,
                    iou_threshold=iou_threshold,
                    confidence_threshold=confidence_threshold)

    if type == 'images':
        #batch_size = len(input_names)
        batch = load_images(input_names, model_size=_MODEL_SIZE)
        inputs = tf.placeholder(tf.float32, [1, *_MODEL_SIZE, 3])
        detections = model(inputs, training=False)
        saver = tf.train.Saver(tf.global_variables(scope='yolo_v3_model'))

        with tf.Session() as sess:
            saver.restore(sess, './weights/model.ckpt')
            detection_result = sess.run(detections, feed_dict={inputs: batch})

        #detection_result = detection_result[0]
        print("detection_result", detection_result)
        draw_boxes(input_names, detection_result, class_names, _MODEL_SIZE)

        print('Detections have been saved successfully.')

    elif type == 'video':
        inputs = tf.placeholder(tf.float32, [1, *_MODEL_SIZE, 3])
        detections = model(inputs, training=False)
        saver = tf.train.Saver(tf.global_variables(scope='yolo_v3_model'))

        with tf.Session() as sess:
            saver.restore(sess, './weights/model.ckpt')

            win_name = 'Video detection'
            cv2.namedWindow(win_name)
            cap = cv2.VideoCapture(input_names[0])
            frame_size = (cap.get(cv2.CAP_PROP_FRAME_WIDTH),
                          cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            fourcc = cv2.VideoWriter_fourcc(*'X264')
            fps = cap.get(cv2.CAP_PROP_FPS)
            out = cv2.VideoWriter('./detections/detections.mp4', fourcc, fps,
                                  (int(frame_size[0]), int(frame_size[1])))

            try:
                while True:
                    ret, frame = cap.read()
                    if not ret:
                        break
                    resized_frame = cv2.resize(frame,
                                               dsize=_MODEL_SIZE[::-1],
                                               interpolation=cv2.INTER_NEAREST)
                    detection_result = sess.run(
                        detections, feed_dict={inputs: [resized_frame]})

                    draw_frame(frame, frame_size, detection_result,
                               class_names, _MODEL_SIZE)

                    cv2.imshow(win_name, frame)

                    key = cv2.waitKey(1) & 0xFF

                    if key == ord('q'):
                        break

                    out.write(frame)
            finally:
                cv2.destroyAllWindows()
                cap.release()
                print('Detections have been saved successfully.')

    else:
        raise ValueError(
            "Inappropriate data type. Please choose either 'video' or 'images'."
        )
Пример #20
0
def main(type, iou_threshold, confidence_threshold, cat, temp_input):
    # def main(type, iou_threshold, confidence_threshold, input_names):

    class_names = load_class_names(_CLASS_NAMES_FILE)
    n_classes = len(class_names)
    input_names = []

    model = Yolo_v3(n_classes=n_classes,
                    model_size=_MODEL_SIZE,
                    max_output_size=_MAX_OUTPUT_SIZE,
                    iou_threshold=iou_threshold,
                    confidence_threshold=confidence_threshold)

    if type == 'images':
        #         batch_size = len(input_names)
        batch_size = len(temp_input)

        if batch_size == 1:
            #             txtFile = 'myCommand.txt'
            #             print(input_names)

            #             if input_names[0] == txtFile:
            #                 print(input_names[0])

            #             if temp_input[0] == txtFile:
            if temp_input[0].endswith('.txt'):
                with open(temp_input[0], 'r') as file1:
                    for line in file1:
                        count = 0
                        for word in line.split():
                            input_names.append(word)
                            count += 1
#                             print(input_names)
#                             detection(input_names, class_names, model, batch_size)
                        batch_size = count
#                         print(batch_size)

#                 file1 = open(txtFile,'r+')
#                 input_names = file1.readlines()
#                 print(input_names)

            else:
                input_names = temp_input
#                 print(input_names)
#                 detection(input_names, class_names, model, batch_size)
        else:
            input_names = temp_input


#             print(input_names)
#             detection(input_names, class_names, model, batch_size)

        batch = load_images(input_names, model_size=_MODEL_SIZE)
        inputs = tf.compat.v1.placeholder(tf.float32,
                                          [batch_size, *_MODEL_SIZE, 3])
        detections = model(inputs, training=False)
        saver = tf.compat.v1.train.Saver(
            tf.compat.v1.global_variables(scope='yolo_v3_model'))
        saver = tf.compat.v1.train.Saver(
            tf.compat.v1.global_variables(scope='yolo_v3_model'))

        with tf.compat.v1.Session() as sess:
            saver.restore(sess, './weights/model.ckpt')
            detection_result = sess.run(detections, feed_dict={inputs: batch})

        print('detection result is out')

        draw_boxes(input_names, detection_result, class_names, _MODEL_SIZE)

        save_detections(cat, input_names, detection_result, class_names)

        #         for img_name, boxes_dict in zip(input_names, detection_result):
        #             file1 = open('./detections/detection.csv', 'a')

        #             for cls in range(len(class_names)):
        #                 boxes = boxes_dict[cls]
        #                 count = 0

        #                 if np.size(boxes) != 0:
        #                     for box in boxes:
        #                         count += 1

        #                 file1.writelines(str(count) + ', ')

        #             tempstr = str(cat) + '\n'
        #             file1.writelines(tempstr)
        #             file1.close()

        print('Detections have been saved successfully.')

    elif type == 'video':
        inputs = tf.compat.v1.placeholder(tf.float32, [1, *_MODEL_SIZE, 3])
        detections = model(inputs, training=False)
        saver = tf.compat.v1.train.Saver(
            tf.compat.v1.global_variables(scope='yolo_v3_model'))

        with tf.compat.v1.Session() as sess:
            saver.restore(sess, './weights/model.ckpt')

            win_name = 'Video detection'
            cv2.namedWindow(win_name)
            cap = cv2.VideoCapture(input_names[0])
            frame_size = (cap.get(cv2.CAP_PROP_FRAME_WIDTH),
                          cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            fourcc = cv2.VideoWriter_fourcc(*'X264')
            fps = cap.get(cv2.CAP_PROP_FPS)
            out = cv2.VideoWriter('./detections/detections.mp4', fourcc, fps,
                                  (int(frame_size[0]), int(frame_size[1])))

            try:
                while True:
                    ret, frame = cap.read()
                    if not ret:
                        break
                    resized_frame = cv2.resize(frame,
                                               dsize=_MODEL_SIZE[::-1],
                                               interpolation=cv2.INTER_NEAREST)
                    detection_result = sess.run(
                        detections, feed_dict={inputs: [resized_frame]})

                    draw_frame(frame, frame_size, detection_result,
                               class_names, _MODEL_SIZE)

                    cv2.imshow(win_name, frame)

                    key = cv2.waitKey(1) & 0xFF

                    if key == ord('q'):
                        break

                    out.write(frame)
            finally:
                cv2.destroyAllWindows()
                cap.release()
                print('Detections have been saved successfully.')

    elif type == 'webcam':
        inputs = tf.compat.v1.placeholder(tf.float32, [1, *_MODEL_SIZE, 3])
        detections = model(inputs, training=False)
        saver = tf.compat.v1.train.Saver(
            tf.compat.v1.global_variables(scope='yolo_v3_model'))

        with tf.compat.v1.Session() as sess:
            saver.restore(sess, './weights/model.ckpt')

            win_name = 'Webcam detection'
            cv2.namedWindow(win_name)
            cap = cv2.VideoCapture(0)
            frame_size = (cap.get(cv2.CAP_PROP_FRAME_WIDTH),
                          cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            fourcc = cv2.VideoWriter_fourcc(*'X264')
            fps = cap.get(cv2.CAP_PROP_FPS)
            out = cv2.VideoWriter('./detections/detections.mp4', fourcc, fps,
                                  (int(frame_size[0]), int(frame_size[1])))

            try:
                while True:
                    ret, frame = cap.read()
                    if not ret:
                        break
                    resized_frame = cv2.resize(frame,
                                               dsize=_MODEL_SIZE[::-1],
                                               interpolation=cv2.INTER_NEAREST)
                    detection_result = sess.run(
                        detections, feed_dict={inputs: [resized_frame]})

                    draw_frame(frame, frame_size, detection_result,
                               class_names, _MODEL_SIZE)

                    cv2.imshow(win_name, frame)

                    key = cv2.waitKey(1) & 0xFF

                    if key == ord('q'):
                        break

                    out.write(frame)
            finally:
                cv2.destroyAllWindows()
                cap.release()
                print('Detections have been saved successfully.')

    else:
        raise ValueError(
            "Inappropriate data type. Please choose either 'video' or 'images'."
        )
Пример #21
0
def main(type, iou_threshold, confidence_threshold, input_file):
    """Detect the no of person and chair in the meeting hall in case of video/webcam.

    Parameters
    ----------
    type : str
        Defines the type of input, video file or webcam.
    iou_threshold : float
        Max Intersection over Union that can be allowed in range of (0,1).
    confidence_threshold : float
        Likelihood that can be accepted and ranges betweeen (0,1).
    input_file : str
        Location if the input video file in case of video type.

    Returns
    -------
        None.
    """
    # Creating database in influxdb under the name detection
    client = InfluxDBClient(host="localhost",port="8086")
    client.create_database("detection")
    client.switch_database("detection")
    print("Created database with the name detection")
    
    # Loading the model with the coco dataset names
    class_names = load_class_names(_CLASS_NAMES_FILE)
    n_classes = len(class_names)
    model = Yolo_v3(n_classes=n_classes, model_size=_MODEL_SIZE,
                    max_output_size=_MAX_OUTPUT_SIZE,
                    iou_threshold=iou_threshold,
                    confidence_threshold=confidence_threshold)
    print("Loading the model was successful.")

    if type == 'video':
        inputs = tf.compat.v1.placeholder(tf.float32, [1, *_MODEL_SIZE, 3])
        detections = model(inputs, training=False)
        saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables(scope='yolo_v3_model'))
        print("Initiating detection on sample video.")
        
        with tf.compat.v1.Session() as sess:
            # Loading the preTrained weight
            saver.restore(sess, './weights/model.ckpt')
            win_name = 'Video detection'
            cv2.namedWindow(win_name)
            cap = cv2.VideoCapture(input_file)
            frame_size = (cap.get(cv2.CAP_PROP_FRAME_WIDTH),
                          cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            fourcc = cv2.VideoWriter_fourcc(*'MP4V')
            fps = cap.get(cv2.CAP_PROP_FPS)
            out = cv2.VideoWriter('./output/sample_output.mp4', fourcc, fps,
                                  (int(frame_size[0]), int(frame_size[1])))
            try:
                # Reading video to detect no of chair and person in each frames
                while True:
                    ret, frame = cap.read()
                    if not ret:
                        break
                    resized_frame = cv2.resize(frame, dsize=_MODEL_SIZE[::-1],
                                               interpolation=cv2.INTER_NEAREST)
                    detection_result = sess.run(detections,
                                                feed_dict={inputs: [resized_frame]})
                    chair_count, person_count, ratio = draw_frame(frame, frame_size,
                                                detection_result,class_names, _MODEL_SIZE)
                    json_body=[
                                {"measurement":"meetingroom","fields":{"Chair":chair_count,
                                "People": person_count,"Ratio": ratio}
                                }
                             ]
                    client.write_points(json_body)
                    cv2.imshow(win_name, frame)
                    key = cv2.waitKey(1) & 0xFF
                    if key == ord('q'):
                        break
                    out.write(frame)
            finally:
                cv2.destroyAllWindows()
                cap.release()
                print('Detections have been saved successfully.')

    elif type == 'webcam':
        inputs = tf.compat.v1.placeholder(tf.float32, [1, *_MODEL_SIZE, 3])
        detections = model(inputs, training=False)
        saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables(scope='yolo_v3_model'))
        print("Initiating detection on sample Webcam.")

        with tf.compat.v1.Session() as sess:
            # Loading the preTrained weight
            saver.restore(sess, './weights/model.ckpt')
            win_name = 'Webcam detection'
            cv2.namedWindow(win_name)
            cap = cv2.VideoCapture(0)
            frame_size = (cap.get(cv2.CAP_PROP_FRAME_WIDTH),
                          cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            fourcc = cv2.VideoWriter_fourcc(*'MP4V')
            fps = cap.get(cv2.CAP_PROP_FPS)
            out = cv2.VideoWriter('./output/sample_output.mp4', fourcc, fps,
                                  (int(frame_size[0]), int(frame_size[1])))

            try:
                # Reading video to detect no of chair and person in each frames
                while True:
                    ret, frame = cap.read()
                    if not ret:
                        break
                    resized_frame = cv2.resize(frame, dsize=_MODEL_SIZE[::-1],
                                               interpolation=cv2.INTER_NEAREST)
                    detection_result = sess.run(detections,
                                                feed_dict={inputs: [resized_frame]})

                    chair_count, person_count, ratio = draw_frame(frame, frame_size,
                                            detection_result, class_names, _MODEL_SIZE)
                    json_body=[
                                {"measurement":"meetingroom","fields":{"Chair":chair_count,
                                "People": person_count,"Ratio": ratio}
                                }
                             ]
                    client.write_points(json_body)
                    cv2.imshow(win_name, frame)
                    key = cv2.waitKey(1) & 0xFF
                    if key == ord('q'):
                        break
                    out.write(frame)
            finally:
                cv2.destroyAllWindows()
                cap.release()
                print('Detections have been saved successfully.')

    else:
        raise ValueError("Inappropriate data type. Please choose either 'video' or 'webcam'")