def build_knn():
    target_file_path_list = ['true.txt', 'false.txt']
    X = []
    Y = []
    for target_file_path in target_file_path_list:
        target_file_list = get_label_file_index(target_file_path)
        # print(true_file_list)
        for label_file in target_file_list:
            file_path = os.path.join(DATA_DIR, label_file)
            if not os.path.exists(file_path):
                download_file(get_video_url(label_file), file_path)

            for frame in open_video(file_path):
                # print(frame)
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                img2 = np.zeros_like(frame)
                img2[:, :, 0] = gray
                img2[:, :, 1] = gray
                img2[:, :, 2] = gray
                frame = img2
                embed = Encoder.predict(np.array([frame]))[0]
                X.append(embed)
                label = target_file_path.split('.')[0]
                Y.append(label)
                # cv2.imwrite(os.path.join(LABEL_FRAME_DIR, label_file+'.png'), frame)
                # cv2.imshow('Frame_focus', frame)
                break
    knn = KNeighborsClassifier(metric='cosine', n_neighbors=5)
    knn.fit(X, Y)
    return knn
Ejemplo n.º 2
0
parser.add_argument('--skip_checksum',
                    action='store_true',
                    help='Skip checksum')
parser.add_argument('--skip_extract',
                    action='store_true',
                    help='Skip extracting files')
args = parser.parse_args()
args.e = args.e or args.dest

df = pd.read_csv(args.csv, delimiter=',')

if not args.skip_download:
    for url in df.url:
        fname = url.split('/')[-1]
        print("Downloading %s:" % fname)
        download_file(url=url, dest_folder=args.dest, fname=fname)
else:
    print("Skipping file download")

if not args.skip_checksum:
    for index, row in df.iterrows():
        url = row['url']
        md5 = row['md5']
        fname = url.split('/')[-1]
        fpath = os.path.join(args.dest, fname)
        print("Verifing %s: " % fname, end='')
        ret = md5_checksum(fpath=fpath, target_hash=md5)
        if not ret:
            raise ValueError(f"Checksum for {fname} failed!")
        else:
            print(f"Checksum correct for {fname}")
        filter(lambda x: x.filename.rsplit("/")[-1] in all_files, zf.filelist))
    example = found_files[example_idx]
    apply_model_to_image_raw_bytes(zf.read(example))


show_valid_example(val_img_fns, example_idx=100)

# sample more images from validation
for idx in np.random.choice(
        range(len(zipfile.ZipFile("val2014_sample.zip").filelist) - 1), 10):
    show_valid_example(val_img_fns, example_idx=idx)
    time.sleep(1)
"""You can download any image from the Internet and appply your model to it!"""

download_utils.download_file(
    "http://www.bijouxandbits.com/wp-content/uploads/2016/06/portal-cake-10.jpg",
    "portal-cake-10.jpg")

apply_model_to_image_raw_bytes(open("portal-cake-10.jpg", "rb").read())
"""Now it's time to find 10 examples where your model works good and 10 examples where it fails! 
You can use images from validation set as follows:
```python
show_valid_example(val_img_fns, example_idx=...)
```
You can use images from the Internet as follows:
```python
! wget ...
apply_model_to_image_raw_bytes(open("...", "rb").read())
```
If you use these functions, the output will be embedded into your notebook and will be visible during peer review!
When you're done, download your noteboook using "File" -> "Download as" -> "Notebook" and prepare that file for peer review!
Ejemplo n.º 4
0
def get_frames():
    font = cv2.FONT_HERSHEY_SIMPLEX
    bottomLeftCornerOfText = (1, 30)
    fontScale = 1
    fontColor = (255, 255, 255)
    lineType = 2
    # download_all_videos()
    data_folder = os.path.join(PROJECT_ROOT, 'data')
    file_list = get_index_file(step_size=50, shuffle=True)
    class_1_q = [0] * 200
    class_2_q = [0] * 200
    prev_frame_embedding = None
    knn = build_knn()
    for file_name in file_list:
        # file_name = this_file_url.split('/')[-1]
        file_path = os.path.join(data_folder, file_name)
        # continue
        if not os.path.exists(file_path):
            download_file(get_video_url(file_name), file_path)

        cap = cv2.VideoCapture(file_path)
        # Check if camera opened successfully
        if (cap.isOpened() == False):
            print("Error opening video stream or file")
        frame_count = 0
        fps = 24
        # Read until video is completed
        while (cap.isOpened()):
            # Capture frame-by-frame
            ret, frame = cap.read()
            counter = 0
            frame_count += 1
            if frame_count % fps != 0:
                continue
            if ret == True:
                frame = frame.astype('float32') / 255
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                img2 = np.zeros_like(frame)
                img2[:, :, 0] = gray
                img2[:, :, 1] = gray
                img2[:, :, 2] = gray
                frame = img2
                h, w = FRAME_SIZE
                # y_sample_idx = np.random.randint(0, frame.shape[0] - h, sample_batch_size)
                # x_sample_idx = np.random.randint(0, frame.shape[1] - w, sample_batch_size)
                # unioned = zip(y_sample_idx, x_sample_idx)
                # for coor in unioned:
                #     y, x = coor
                x, y = 180, 180
                cropped_frame = frame[y:y + h, x:x + w]
                ret = Encoder.predict(np.array([cropped_frame]))[0]
                pred = knn.predict([ret])[0].split('_')[0]
                prob = knn.predict_proba([ret])[0]
                print(file_name, pred)
                recon = EncoderDecoder.predict(np.array([cropped_frame]))[0]
                counter += 1
                if prev_frame_embedding is not None:

                    class_1_q.pop(0)
                    class_2_q.pop(0)
                    class_1_q.append(prob[0])
                    class_2_q.append(prob[1])
                    if cv2.waitKey(25) & 0xFF == ord('q'):
                        break
                    cv2.putText(
                        cropped_frame,
                        pred,
                        bottomLeftCornerOfText,
                        font,
                        fontScale,
                        fontColor,
                        lineType,
                    )
                    fig = plt.figure()
                    plot = fig.add_subplot(111)
                    plot.plot(class_1_q, )
                    plot.plot(class_2_q, )
                    fig.canvas.draw()

                    # Now we can save it to a numpy array.
                    data = np.fromstring(fig.canvas.tostring_rgb(),
                                         dtype=np.uint8,
                                         sep='')
                    data = data.reshape(fig.canvas.get_width_height()[::-1] +
                                        (3, ))
                    plt.cla()
                    plt.clf()
                    plt.close()
                    cv2.imshow('ts', data)
                    cv2.imshow('Live', cropped_frame)
                    cv2.imshow('Reconstruction', recon)
                if prev_frame_embedding is None:
                    # set first frame as reference
                    prev_frame_embedding = ret

            # Break the loop
            else:
                break
Ejemplo n.º 5
0
    data_folder = os.path.join(PROJECT_ROOT, 'data')
    file_list = get_index_file(step_size=10)
    encoder_path = os.path.join(PROJECT_ROOT, 'models', encoder_name)
    model_path = os.path.join(PROJECT_ROOT, 'models', model_name)

    noise_factor = 0.3

    if os.path.exists(model_path):
        autoencoder = load_model(model_path)
    for this_file_url in file_list:
        print(this_file_url)
        file_name = this_file_url.split('/')[-1]
        file_path = os.path.join(data_folder, file_name)
        # continue
        if not os.path.exists(file_path):
            download_file(get_video_url(this_file_url), file_path)

        cap = cv2.VideoCapture(file_path)
        # Check if camera opened successfully
        if (cap.isOpened() == False):
            print("Error opening video stream or file")
        buf = []
        noise_buf = []
        frame_count = 0
        fps = 4
        # Read until video is completed
        while (cap.isOpened()):
            # Capture frame-by-frame
            ret, frame = cap.read()
            counter = 0
            frame_count += 1
DATASET_PREPROCESS_FN = {
    "ag": preprocess_ag.download_and_preprocess_ag,
    "imdb": preprocess_imdb.download_and_preprocess_imdb,
    "mnli": preprocess_mnli.download_and_preprocess_mnli,
    "mr": preprocess_mr.download_and_preprocess_mr,
    "snli": preprocess_snli.download_and_preprocess_snli,
    "yelp": preprocess_yelp.download_and_preprocess_yelp
}

if __name__ == "__main__":
    FLAGS = parser.parse_args()

    if FLAGS.process_raw == "1":
        for name, processing_func in DATASET_PREPROCESS_FN.items():
            logger.info("Start download and process %s.", name)
            processing_func()

    else:
        download_file(subdir="", **downloadable_dataset_urls["processed-datasets"])

    if FLAGS.verify == "1":
        root_dir = get_root_dir()
        datasets_dir = os.path.join(root_dir, "datasets")
        dataset_json_list = sorted(glob.glob(datasets_dir + "/*/*.json"))
        for json_filename in dataset_json_list:
            logger.info("Verify %s.", json_filename)
            with open(json_filename) as f:
                data = json.load(f)

            verify_dataset(data)