def get_events_from_vid(vid, initial_frame_number):
    events = [get_events_from_corner_group(corner_group, vid)
              for corner_group in get_corner_groups(vid)
              if corner_group[2] is not None]
    events = flatten(events)
    events = update_frame_numbers(events, initial_frame_number)
    return events
def apply_piece_predictions_to_events(events, predictor, batch_size=1000):
    batches = chunk(events, batch_size)

    def process(batch):
        length = len(batch)
        x = np.empty((32, 32, length))
        for i in range(length):
            x[:, :, i] = batch[i][4]
        x = x / 255
        return [(start, end, file, rank, piece, certainty)
                for ((start, end, file, rank, _), (piece, certainty))
                in zip(batch, predictor.get_predictions(x))]
    return flatten([process(batch) for batch in batches])
示例#3
0
def select():
    # Connect to database
    conn = sqlite3.connect('./db/incarceration.db')

    # Query the database
    state_data = pd.read_sql_query(f"""SELECT DISTINCT state
                                    FROM incarceration;
                                    """, conn)

    states = helper_functions.flatten(state_data)

    conn.close()

    session['states'] = states

    return render_template('select.html', states=states)
示例#4
0
def show_state(state_name):
    # Connect to database
    conn = sqlite3.connect('./db/incarceration.db')

    # Query the database
    county_data = pd.read_sql_query(f"""SELECT DISTINCT county_name
                                    FROM incarceration
                                    WHERE state = '{state_name}';
                                    """, conn)

    counties = helper_functions.flatten(county_data)

    conn.close()

    session['counties'] = counties

    return render_template('select.html', state_name=state_name, counties=counties, states=session.get('states'))
def concatenate_identical_predictions(events):

    def compress(e1, e2):
        return (e1[0],
                e2[1],
                *e1[2:])

    def is_duplicate(e1, e2):
        start, end, file, rank, piece_code, certainty = zip(e1, e2)

        both_equal = lambda p: p[0] == p[1]
        return both_equal(file) \
               and both_equal(rank) \
               and both_equal(piece_code) \
               and end[0] <= start[1]

    def dedup(events):
        deduped = []
        i = 0
        events = list(events)
        length = len(events)
        while i < length:
            j = 1
            while i + j < length and is_duplicate(events[i], events[i+j]):
                j += 1
            if j > 1:
                deduped.append(compress(events[i], events[i+j-1]))
            else:
                deduped.append(events[i])
            i += j

        return deduped

    sorted_events = sorted(events, key=lambda e: (e[2], e[3], e[0], e[1]))
    groups = groupby(sorted_events, key=lambda e: (e[2], e[3]))

    return flatten([dedup(group) for key, group in groups])
def main(args):
    # Load image from filepath or URL
    # Initialize predictor, takes a while, but only needed once
    predictor = ChessboardPredictor()
    # Load image from file
    # open dir, loop through files
    processing_time = start_time("Processing")
    time = start_time("Event creation")
    events = []
    video_container = VideoContainer(args.filepath)
    processed = 0
    start_at = 0
    end_at = video_container.length
    for vid, initial_frame in video_container.get_video_array(start_at=start_at,
                                                              end_at=end_at,
                                                              chunk_size=1000,
                                                              overlap=1):
        events.append(get_events_from_vid(vid, initial_frame))
        processed += len(vid)
        print("batch processed")

    events = flatten(events)
    print("number of events: ", len(events))
    end_time('Event Creation', time)

    time = start_time("Predictions")
    events = apply_piece_predictions_to_events(events, predictor)
    end_time("Predictions", time)

    events = concatenate_identical_predictions(events)
    print("number of events after dedup: ", len(events))
    print("processed: ", processed)
    board_arrays = np.zeros((processed, 8, 8), dtype='uint8') + 20
    for start, end, file, rank, piece_code, certainty in events:
        board_arrays[start - start_at:end - start_at + 1, file, rank] = piece_code

    deduped = [x[0] for x in groupby(board_arrays_to_fens(board_arrays))]

    print(deduped)
    exit()
    time = start_time("Write Video")
    # board_arrays_to_mp4(board_arrays)
    # show_together(args.filepath, board_arrays, 550)

    svgs = board_arrays_to_svgs(board_arrays)
    rendered_svgs = render_svgs(svgs)

    video_stream = VideoContainer(args.filepath).stream(start_at)

    out_stream = overlay_video(video_stream, rendered_svgs, (0, 0))
    out_stream = streamify(partial(apply_number, loc=(20,300)), out_stream, count())

    print_stream('file.avi', out_stream, view_while_writing=True)

    end_time("Write Video", time)

    np.set_printoptions(edgeitems=4)

    end_time('Processing', processing_time)

    predictor.close()
    exit()

    # from itertools import groupby
    # print('l tab bf ', len(table))
    # table = [list(g)[0] for k, g in groupby(table, lambda x: x[0])]
    # print('l tab af ', len(table))

    with open('fens.csv', 'w') as fenCsvFile:
        writer = csv.writer(fenCsvFile, dialect='excel')
        writer.writerows(table)