Exemplo n.º 1
0
    # blend back-warped line plotting with undistored original image
    img_blended = cv2.addWeighted(img_undist, 0.7, img_warpback, 1.0, 0)
    
    # put text on image
    put_text_on_img(img_blended, (leftl.curvature+rightl.curvature)/2000, \
        measure_deviation_left(leftl.dist2line, rightl.dist2line))

    return img_blended

# create line tracer
tracer = LineTracer()

#in_clip = VideoFileClip("project_video.mp4").subclip(0,2)
in_clip = VideoFileClip("project_video.mp4")
#in_clip = VideoFileClip("challenge_video.mp4")
#out_clip = in_clip.fl_image(process_image) #NOTE: this function expects color images!!
new_frames = []
for frame in in_clip.iter_frames():
    new_frame = process_image(frame, tracer.getLast())
    new_frames.append(new_frame)
out_clip = ImageSequenceClip(new_frames, fps = 25)
out_clip.write_videofile('output_images/project_video.mp4')
#ipython_display(out_clip)



#%%


# Assemble pictures in a folder, write to a videofile and gif
from moviepy.editor import ImageSequenceClip
clip = ImageSequenceClip("video_output/output", fps=24)
clip.to_videofile("video_output/output/output.mp4",
                  fps=24)  # many options available

#clip.to_gif("video_output/output/mygif.gif", fps=10) # many options available
Exemplo n.º 3
0
def make_video(arrays, episode, out_dir, mode):
    filename = os.path.join(out_dir, f"{mode}.{episode}.mp4")
    # array = np.stack(arrays, axis=0)
    clip = ImageSequenceClip(arrays, fps=1)
    print(filename)
    clip.write_videofile(filename)
def imagearray2file(img_array, outpath=None, fps=30):
    '''
    :param nparray: RxCxTxwidthxheightx3
    :param outpath: the directory where T images will be dumped for each time point in range T
    :param fps: fps of the gif file
    :return:
        it will return an image list with length T
        if outpath is given as a png file, an image will be saved for each t in T.
        if outpath is given as a gif file, an animated image with T frames will be created.
    '''
    import cv2
    from human_body_prior.tools.omni_tools import makepath

    if outpath is not None:
        makepath(outpath, isfile=True)

    if not isinstance(img_array, np.ndarray) or img_array.ndim < 6:
        raise ValueError('img_array should be a numpy array of shape RxCxTxwidthxheightx3')

    R, C, T, img_h, img_w, img_c = img_array.shape

    out_images = []
    for tIdx in range(T):
        row_images = []
        for rIdx in range(R):
            col_images = []
            for cIdx in range(C):
                col_images.append(img_array[rIdx, cIdx, tIdx])
            row_images.append(np.hstack(col_images))
        t_image = np.vstack(row_images)
        out_images.append(t_image)

    if outpath is not None:
        if '.png' in outpath:
            for tIdx in range(T):
                if T > 1:
                    cur_outpath = outpath.replace('.png', '_%03d.png'%tIdx)
                else:
                    cur_outpath = outpath
                cv2.imwrite(cur_outpath, out_images[tIdx])
                while not os.path.exists(cur_outpath): continue  # wait until the snapshot is written to the disk
        elif '.gif' in outpath:
            import imageio
            with imageio.get_writer(outpath, mode='I', fps = fps) as writer:
                for tIdx in range(T):
                    img = out_images[tIdx].astype(np.uint8)
                    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                    writer.append_data(img)
        elif '.avi' in outpath:
            fourcc = cv2.VideoWriter_fourcc(*'XVID')
            video = cv2.VideoWriter(outpath, fourcc, fps, (img_w, img_h), True)
            for tIdx in range(T):
                img = out_images[tIdx].astype(np.uint8)
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                video.write(img)

            video.release()
            cv2.destroyAllWindows()
        elif '.mp4' in outpath:

            from moviepy.editor import ImageSequenceClip
            animation = ImageSequenceClip(out_images, fps=fps)
            animation.write_videofile(outpath)

    return out_images
Exemplo n.º 5
0
def process_imgs(srcdir, dstdir):
    raw_imgs = sorted(glob.glob(srcdir + '/*.jpg'))

    if not os.path.exists(dstdir):
        os.makedirs(dstdir)

    # the number of historical fit to be stored
    smoothing_factor = 12

    # the number of consecutive drop threshold to initiate new search
    window_reset_threshold = 3

    # change threshold to reject a fitting
    linear_coeff_threshold = 0.3

    # Instantiate line object for left and right line
    left_line = Line()
    right_line = Line()

    for i in tqdm(range(0, len(raw_imgs))):
        # Read in frame
        image = plt.imread(raw_imgs[i])

        # Apply distortion correction
        undist = cv2.undistort(image, mtx, dist, None, mtx)

        # Apply color space transform and thresholding
        combined_binary = mask_color(undist)

        # Perspective tranform to aerial view
        warped = transform_aerial_view(undist)
        binary_warped = transform_aerial_view(combined_binary)

        # Lane Detection and fitting
        left_line.current_fit, right_line.current_fit, left_line.allxy, right_line.allxy = detect_and_fit_lane(
            binary_warped, left_line.best_fit, right_line.best_fit)

        # Sanity Check
        #--------------
        # Compute the difference between the current fit coeff and the best fit coeff
        if i > 0:
            left_line.diffs = left_line.current_fit - left_line.best_fit
            right_line.diffs = right_line.current_fit - right_line.best_fit

        # Drop the current fit and use the previous fit coeef
        # if the linear coeff is over a specified threshold
        # otherwise, current fit is good to go
        # the number of consecutive drop is recorded
        if np.absolute(left_line.diffs[1]) > linear_coeff_threshold:
            left_line.current_fit = left_line.previous_fits[-1]
            if left_line.detected == False:
                left_line.drop_count += 1
            left_line.detected = False
        else:
            left_line.detected = True
            left_line.drop_count = 0

        if np.absolute(right_line.diffs[1]) > linear_coeff_threshold:
            right_line.current_fit = right_line.previous_fits[-1]
            if right_line.detected == False:
                right_line.drop_count += 1
            right_line.detected = False
        else:
            right_line.detected = True
            right_line.drop_count = 0

        # When a consecutive of drop has reached the specified threshold
        # a complete new sliding window search is executed.
        if (left_line.drop_count >= window_reset_threshold) | (
                right_line.drop_count >= window_reset_threshold):
            # reset the drop count
            left_line.drop_count = 0
            right_line.drop_count = 0

            # New window search
            left_line.current_fit, right_line.current_fit, left_temp, right_temp = detect_and_fit_lane(
                binary_warped, None, None)

            # There are chances new window search are not usable,
            # previous fit is used instead
            if (left_line.current_fit is None):
                left_line.current_fit = left_line.previous_fits[-1]
            else:
                left_line.allxy = left_temp

            if (right_line.current_fit is None):
                right_line.current_fit = right_line.previous_fits[-1]
            else:
                right_line.allxy = right_temp

            left_line.detected = True
            right_line.detected = True

            # Clear all the historical fit since a new fit is used
            left_line.previous_fits = []
            right_line.previous_fits = []

        # Discard the first element of the history list to
        # to make room for recent fit coeff.
        if len(left_line.previous_fits) == smoothing_factor:
            left_line.previous_fits.pop(0)
            right_line.previous_fits.pop(0)

        # Append recent fit coeff. to the history list
        left_line.previous_fits.append(left_line.current_fit)
        right_line.previous_fits.append(right_line.current_fit)

        # Smooth out the coeff. averaging all the value in history list
        if len(left_line.previous_fits) == 1:
            left_line.best_fit = left_line.previous_fits[0]
            right_line.best_fit = right_line.previous_fits[0]
        else:
            left_line.best_fit = np.vstack(
                left_line.previous_fits).mean(axis=0)
            right_line.best_fit = np.vstack(
                right_line.previous_fits).mean(axis=0)

        # Get bird-eye view with marked lane
        marked = mark_lane(binary_warped, left_line.best_fit,
                           right_line.best_fit, left_line.allxy,
                           right_line.allxy, left_line.detected,
                           right_line.detected)

        # Print best fit coeff.
        font = cv2.FONT_HERSHEY_SIMPLEX
        cv2.putText(
            marked,
            'L-fit: ' + str(np.round(left_line.best_fit, 3).astype(str)),
            (50, 100), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
        cv2.putText(
            marked,
            'R-fit: ' + str(np.round(right_line.best_fit, 3).astype(str)),
            (690, 100), font, 1, (255, 255, 255), 2, cv2.LINE_AA)

        # Overlay detected lane on perspective image
        lane_warped = transform_single_point_perspective(
            undist, left_line.current_fit, right_line.current_fit,
            left_line.allxy, right_line.allxy)

        # Compute lane curvature and vehicle offset
        left_line.radius, right_line.radius, vehicle_pos = compute_lane_curvature(
            left_line.current_fit, right_line.current_fit)

        # Print radius for left and right line
        cv2.putText(
            lane_warped, 'Left Curv. : ' + str(int(left_line.radius)) +
            ' | Right Curv. : ' + str(int(right_line.radius)), (80, 100), font,
            1.5, (230, 255, 122), 3, cv2.LINE_AA)

        # Print vehicle relative position to lane center
        if vehicle_pos < 0:
            cv2.putText(
                lane_warped, 'Vehicle is left of ' +
                str(np.round(np.absolute(vehicle_pos), 3)) + 'm lane center.',
                (80, 160), font, 1.5, (230, 255, 122), 3, cv2.LINE_AA)
        else:
            cv2.putText(
                lane_warped, 'Vehicle is right of ' +
                str(np.round(np.absolute(vehicle_pos), 3)) + 'm lane center.',
                (80, 160), font, 1.5, (230, 255, 122), 3, cv2.LINE_AA)

        # Create a collage of images as a single frame for final video
        collage = np.zeros([720, 1280 + 427, 3], dtype='uint8')
        collage[0:720, 0:1280, :] = lane_warped
        collage[0:240, 1280:, :] = cv2.cvtColor(
            cv2.resize(combined_binary, (427, 240),
                       interpolation=cv2.INTER_AREA) * 255, cv2.COLOR_GRAY2RGB)
        collage[240:480, 1280:, :] = cv2.resize(marked, (427, 240),
                                                interpolation=cv2.INTER_AREA)
        collage[480:720, 1280:, :] = cv2.resize(warped, (427, 240),
                                                interpolation=cv2.INTER_AREA)

        plt.imsave(dstdir + "/" + os.path.basename(raw_imgs[i]),
                   collage,
                   format="jpeg")

    # Compile video
    clip = ImageSequenceClip(dstdir, fps=25)
    clip.write_videofile(dstdir + '.mp4')
TIMELAPSE_FOLDER = "timelapses"
OUTPUT_FOLDER = "video"

if __name__ == "__main__":
    for plant_folder in os.listdir(TIMELAPSE_FOLDER):
        print(plant_folder)
        image_filename_list = []
        last_date = None
        for image_filename in os.listdir(
                os.path.join(TIMELAPSE_FOLDER, plant_folder)):
            date = datetime.strptime(image_filename[:-4],
                                     "%Y-%m-%dT%H:%M:%S.%fZ")
            if last_date is not None:
                dt = date - last_date
                while dt > timedelta(days=1):
                    image_filename_list.append(
                        os.path.join(TIMELAPSE_FOLDER, plant_folder,
                                     image_filename))
                    dt -= timedelta(days=1)
            image_filename_list.append(
                os.path.join(TIMELAPSE_FOLDER, plant_folder, image_filename))
            last_date = date
        print(image_filename_list)
        clip = ImageSequenceClip(image_filename_list, fps=10)
        # clip.write_gif(os.path.join(OUTPUT_FOLDER, plant_folder + ".gif"))
        clip.write_videofile(filename=os.path.join(OUTPUT_FOLDER,
                                                   plant_folder + ".mp4"),
                             codec="libx264",
                             bitrate="1000000",
                             audio=False)
Exemplo n.º 7
0
def concatenate_frames_to_clip(frames_path, fps=40):
    frames = os.listdir(frames_path)
    frames = sorted(frames, key=lambda x: get_index(x))
    image_sequence = [os.path.join(frames_path, frame) for frame in frames]
    clip = ImageSequenceClip(image_sequence, fps=fps)
    clip.write_videofile("image_clip.mp4")
Exemplo n.º 8
0
            from scipy import signal
            y = signal.lfilter(b, a, x=r)
            return y[::-1]

        true_discounted_returns = alt(episode_reward_record, 0.99)

        for i in range(1, len(episode_reward_record)):
            return_buffer.append([
                replay_buffer[-i][0], replay_buffer[-i][1],
                true_discounted_returns[-i]
            ])

        if len(render_buffer) > 0:
            from moviepy.editor import ImageSequenceClip

            clip = ImageSequenceClip(render_buffer, fps=5)
            clip.write_gif("IQNViz3/ep" + str(ep_id) + '.gif', fps=5)
            render_buffer = []

        episode_reward_record = []
        num_steps = 0
        ep_id += 1

    if len(replay_buffer) >= batch_sz:
        batch = random.sample(replay_buffer, batch_sz)

        x_batch = [i[0] for i in batch]
        a_batch = [i[1] for i in batch]
        r_batch = [i[2] for i in batch]
        x_p_batch = [i[3] for i in batch]
        t_batch = [not i[4] for i in batch]
Exemplo n.º 9
0
def make_video(img_folder, out="video.mp4"):
    imgs = [os.path.join(img_folder, img) for img in os.listdir(img_folder)]

    clip = ImageSequenceClip(imgs * 5, fps=10)
    clip.write_videofile(out)
Exemplo n.º 10
0
# create a video clip from bunch of image
from moviepy.editor import ImageSequenceClip

clip = ImageSequenceClip('test', fps=3)
clip.write_videofile('crowdai.mp4', audio=False)
Exemplo n.º 11
0
def main(arguments: List[str]) -> int:

    args = parser.parse_args(arguments)

    sequence_list = os.listdir(args.root)

    os.makedirs(args.output_dir, exist_ok=True)

    max_videos = args.max_videos
    for seq_name in sequence_list[:max_videos]:
        seq_path = os.path.join(args.root, seq_name)
        df = pd.read_csv(seq_path)
        count = 0
        time_list = np.sort(np.unique(df["TIMESTAMP"].values))

        # Get API for Argo Dataset map
        avm = ArgoverseMap()
        city_name = df["CITY_NAME"].values[0]
        seq_lane_bbox = avm.city_halluc_bbox_table[city_name]
        seq_lane_props = avm.city_lane_centerlines_dict[city_name]

        x_min = min(df["X"])
        x_max = max(df["X"])
        y_min = min(df["Y"])
        y_max = max(df["Y"])

        lane_centerlines = []
        # Get lane centerlines which lie within the range of trajectories
        for lane_id, lane_props in seq_lane_props.items():

            lane_cl = lane_props.centerline

            if (np.min(lane_cl[:, 0]) < x_max and np.min(lane_cl[:, 1]) < y_max
                    and np.max(lane_cl[:, 0]) > x_min
                    and np.max(lane_cl[:, 1]) > y_min):
                lane_centerlines.append(lane_cl)

        seq_out_dir = os.path.join(args.output_dir, seq_name.split(".")[0])

        for time in time_list:

            plt.xlim(x_min, x_max)
            plt.ylim(y_min, y_max)

            df_cur = df.loc[df["TIMESTAMP"] <= time]

            if time == time_list[-1]:
                viz_sequence(df_cur,
                             lane_centerlines=lane_centerlines,
                             show=False,
                             smoothen=False)
            else:
                viz_sequence(df_cur,
                             lane_centerlines=lane_centerlines,
                             show=False,
                             smoothen=False)

            os.makedirs(seq_out_dir, exist_ok=True)

            plt.savefig(os.path.join(seq_out_dir, f"{count}.png"),
                        bbox_inches="tight",
                        pad_inches=0)
            plt.close()
            count += 1

        from moviepy.editor import ImageSequenceClip

        img_idx = sorted(
            [int(x.split(".")[0]) for x in os.listdir(seq_out_dir)])
        list_video = [f"{seq_out_dir}/{x}.png" for x in img_idx]
        clip = ImageSequenceClip(list_video, fps=10)
        video_path = os.path.join(args.output_dir,
                                  f"{seq_name.split('.')[0]}.mp4")
        clip.write_videofile(video_path)
        shutil.rmtree(seq_out_dir)
    return 0
Exemplo n.º 12
0
def main():
    """Make a jazz noise here"""

    args = get_args()

    # Variables

    # Directory containing tar'd or foldered lansat data
    # indir = r'D:\lansat\Bulk Order Large_lansat_8\test'

    # How strict do you want the cloud recognition to be, the lower the more strict
    # how_strict= 0.7


    # # GPS Bounding Box for sampling area, [xmin, xmax, ymin, fymax]
    
    
    # xmin ymin  xmax ymax - gdal 
    
    # xmin ymax xmax ymin

    # x1 = 160785
    # y1 = 3467622
    # x2 = 169515
    # y2 = 3462902

    bb = args.bounding_box

    xmin = int(bb[0])
    ymin = int(bb[1])
    xmax = int(bb[2])
    ymax = int(bb[3])

    #--------------------------------------------------------------------------

    # -Main-


    # If you ahve tars still in the directory, these will grab them and untar them
    
    tars = glob.glob(os.path.join(args.indir ,'*.tar'))
    print('Extracting tar files...')

    # if len(tars) > 1:
    for tar in tars:
        out_file = tar.split('.')[0]


        # making a file for the tars to land
        if not os.path.exists(out_file):
            os.makedirs(out_file)
        one_tar = tarfile.open(tar)
        one_tar.extractall(out_file)
            
            
    # # Function to take a lansat image and crop it to the sample area in Baker County, GA  

    lv1 = glob.glob(os.path.join(args.indir , '*'))
    # print(lv1)
    # print(lv1)

    print('Extraction complete, cropping Lansat images...')
    for folder in lv1:

        if os.path.isdir(folder):
            folder_name = os.path.basename(folder)


            if folder_name.startswith('L'):
                cnt = 1
                # print(os.path.join(folder, '*.TIF'))
                TIFs = glob.glob(os.path.join(folder, '*.TIF'))
                # print(TIFs)

                date = folder.split("_")[-4]

                outdir = os.path.join(args.indir, date)

                while os.path.isdir(outdir):
                    outdir = outdir + f'_{cnt}'
                    cnt += 1

                os.mkdir(outdir)

                # Itterating through the image list
                for im in TIFs:
                    # print(im)

                    split = im.split('_')
                    date = split[-6]
                    band = split[-1]
                    filename = date+'_'+band



            #         print(filename)

                    # Opening each one in GDAL
                    img = gdal.Open(im)

                    # print('translating')
                    # Need to add -a_ullr xmin ymax xmax ymin

                    # large extent
                    # 160894,3459609,173729,3468590
                    # QGIS copy seems to be in xmin ymin xmax ymax
                    # We need to change the input to reflect that for ease of use
                    gdal.Translate(os.path.join(outdir, filename), img, projWin = [xmin, ymax, xmax, ymin])

    lv2 = glob.glob(os.path.join(args.indir, '*'))

    print('Creating cloudy and clear directories for sorting...')

    if not os.path.exists(os.path.join(args.indir, 'cloudy')):
            os.makedirs(os.path.join(args.indir, 'cloudy'))

    if not os.path.exists(os.path.join(args.indir, 'clear')):
            os.makedirs(os.path.join(args.indir, 'clear'))
            
    if not os.path.exists(os.path.join(args.indir, 'NDWI')):
            os.makedirs(os.path.join(args.indir, 'NDWI'))
    
    print('Scanning for cloudcover and running NDWI analysis...')

    for date_folder in lv2:

        if (os.path.isdir(date_folder)):
            date = os.path.basename(date_folder)
            
            
            if not date.startswith('L'):
                band1_imgs = glob.glob(os.path.join(date_folder , '*B1.TIF'))      

                for img in band1_imgs:
                    filename = os.path.basename(img)
    #                     print(img)
    #                     pil_im = Image.open(img)
    #                     display(pil_im)
                    # Open it, histogram mean, sort
                    testing_img = cv2.imread(img)
                    testing_vals = testing_img.mean(axis=2).flatten()
                    testing_mode = statistics.mode(testing_vals)
                    testing_average = statistics.mean(testing_vals)

                    # print('Testing mode: ',testing_mode)
                    # print('Testing Average: ', testing_average)
                    if testing_mode == 0.0:
                        # print("Black image")

                    if (len([1 for i in testing_vals if i > testing_mode]) >= len(testing_vals)*args.how_strict) or (testing_average > 35) or (testing_average < 10):
                        # print("Cloudy image")

                        # print(date)
                        try:
                            shutil.move(os.path.join(args.indir, date), os.path.join(args.indir, 'cloudy'))
                        except:
                            continue
                    else:
                        # print("Clear image")
                        # print(date)

                        # do NDWI Then move


                        # Calculation
                        # NDWI = (3 - 5)/(3 + 5)
                        date_folder
                        band3 = glob.glob(os.path.join(date_folder, '*B3.TIF'))
                        band5 = glob.glob(os.path.join(date_folder, '*B5.TIF'))
                        b3 = rio.open(band3[0])
                        b5 = rio.open(band5[0])
                        green = b3.read()
                        nir = b5.read()
                        ndwi = (nir.astype(float)-green.astype(float))/(nir+green)
                        print(type(ndwi))




                        # Plotting
                        fig, ax = plt.subplots(1, figsize=(12, 10))
                        show(ndwi, ax=ax, cmap="coolwarm_r")
                        # testing
                        plt.axis('off')

                        plt.savefig(os.path.join(date_folder, date + '_NDWI.TIF'), bbox_inches = 'tight')
                        plt.savefig(os.path.join(args.indir, 'NDWI' , date + '_NDWI.TIF'), bbox_inches = 'tight')
                        
                        b3.close()
                        b5.close()

                        try:
                            shutil.move(os.path.join(args.indir,  date), os.path.join(args.indir, 'clear'))
                        except:
                            continue


    ndwi_TIFs = glob.glob(os.path.join(args.indir, 'NDWI', '*.TIF'))

   
    for i in ndwi_TIFs:
        # Add coordinates
        img = gdal.Open(i)
        gdal.Translate(i.replace('.TIF', '_reprojected.TIF'), img, outputBounds = [xmin, ymax, xmax, ymin])

        # Labeling
        # pic_name = os.path.basename(i)
        # pic_name = pic_name.replace('.TIF', '')
        # img = cv2.imread(i)
        # height, width, channels = img.shape
        # font = cv2.FONT_HERSHEY_SIMPLEX
        # cv2.putText(img,pic_name,(int(width/2),height, font, 1,(0,0,0),2))
        # cv2.imwrite(i.replace('.TIF', '_labeled.TIF'), img)

    # ndwi_TIFs_labeled = glob.glob(os.path.join(args.indir, 'NDWI', '*labeled.TIF'))
    clip = ImageSequenceClip(ndwi_TIFs,fps=.20)
    clip.write_gif(os.path.join(args.indir, 'NDWI', 'final.gif'))


    print(f'Finished analysis, find NDWI outputs at {os.path.join(args.indir,'NDWI')}.')

# --------------------------------------------------
if __name__ == '__main__':
    main()
Exemplo n.º 13
0
        print y_pos.shape
        print pol.shape

        (timestamps, x_pos,
         y_pos, pol) = dvsproc.clean_up_events(timestamps, x_pos,
                                               y_pos, pol, window=1000)

        frames, fs, _ = dvsproc.gen_dvs_frames(timestamps, x_pos, y_pos,
                                               pol, num_frames, fs=3)
        print "Length of produced frames: ", len(frames)
        new_frames = []
        for frame in frames:
            tmp_frame = (((frame+fs)/float(2*fs))*255).astype(np.uint8)
            new_frames.append(tmp_frame)

        clip = ImageSequenceClip(new_frames, fps=20)
        clip.write_gif(seq_save_path, fps=30)

        print "Sequence %s is saved at %s" % (img_name, seq_save_path)
elif option == "caltech256-ps":
    caltech_fn = "INI_Caltech256_10fps_20160424.hdf5"
    caltech_path = os.path.join(data_path, caltech_fn)
    caltech_db = h5py.File(caltech_path, mode="r")
    caltech_stats_path = os.path.join(stats_path, "caltech256_stats.pkl")
    caltech_save_path = os.path.join(data_path, "caltech256_ps.eps")
    img_num = 60

    f = file(caltech_stats_path, mode="r")
    caltech_stats = pickle.load(f)
    f.close()
Exemplo n.º 14
0
def run(outdir, train_mode):

    # Build network.
    initializer = tf.keras.initializers.VarianceScaling()
    X = tf.placeholder(tf.float32, shape=[None, n_inputs])
    hidden = tf.layers.dense(X,
                             N_HIDDEN,
                             activation=tf.nn.elu,
                             kernel_initializer=initializer)
    logits = tf.layers.dense(hidden, n_outputs)
    outputs = tf.nn.sigmoid(logits)  # probability of action 0 (left)
    p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
    action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)

    # Optimizer, gradients.
    y = 1. - tf.to_float(action)
    cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y,
                                                            logits=logits)
    optimizer = tf.train.AdamOptimizer(LEARNING_RATE)
    grads_and_vars = optimizer.compute_gradients(cross_entropy)
    gradients = [grad for grad, variable in grads_and_vars]
    gradient_placeholders = []
    grads_and_vars_feed = []
    for grad, variable in grads_and_vars:
        gradient_placeholder = tf.placeholder(tf.float32,
                                              shape=grad.get_shape())
        gradient_placeholders.append(gradient_placeholder)
        grads_and_vars_feed.append((gradient_placeholder, variable))
    training_op = optimizer.apply_gradients(grads_and_vars_feed)

    # For TensorBoard.
    episode_reward = tf.placeholder(dtype=tf.float32, shape=[])
    tf.summary.scalar('reward', episode_reward)

    init = tf.global_variables_initializer()
    saver = tf.train.Saver()

    if train_mode:
        hp_save_dir = hp_directory(outdir)
        with tf.Session() as sess:
            init.run()
            # For TensorBoard.
            print('hp_save_dir')
            train_writer = tf.summary.FileWriter(hp_save_dir, sess.graph)
            for iteration in range(n_iterations):
                all_rewards = []
                all_gradients = []
                for game in range(N_GAMES_PER_UPDATE):
                    current_rewards = []
                    current_gradients = []
                    obs = env.reset()
                    for step in range(n_max_steps):
                        action_val, gradients_val = sess.run(
                            [action, gradients],
                            feed_dict={X: obs.reshape(1, n_inputs)})
                        obs, reward, done, info = env.step(action_val[0][0])
                        current_rewards.append(reward)
                        current_gradients.append(gradients_val)
                        if done:
                            break
                    all_rewards.append(current_rewards)
                    all_gradients.append(current_gradients)
                avg_reward = np.mean(([np.sum(r) for r in all_rewards]))

                print('\rIteration: {}, Reward: {}'.format(iteration,
                                                           avg_reward,
                                                           end=''))
                all_rewards = discount_and_normalize_rewards(
                    all_rewards, discount_rate=DISCOUNT_RATE)
                feed_dict = {}
                for var_index, gradient_placeholder in enumerate(
                        gradient_placeholders):
                    mean_gradients = np.mean([
                        reward * all_gradients[game_index][step][var_index]
                        for game_index, rewards in enumerate(all_rewards)
                        for step, reward in enumerate(rewards)
                    ],
                                             axis=0)
                    feed_dict[gradient_placeholder] = mean_gradients
                sess.run(training_op, feed_dict=feed_dict)
                if iteration % save_iterations == 0:
                    print('Saving model to ', hp_save_dir)
                    model_file = '{}/my_policy_net_pg.ckpt'.format(hp_save_dir)
                    saver.save(sess, model_file)
                    # Also save event files for TB.
                    merge = tf.summary.merge_all()
                    summary = sess.run(merge,
                                       feed_dict={episode_reward: avg_reward})
                    train_writer.add_summary(summary, iteration)
            obs = env.reset()
            steps = []
            done = False
    else:  # Make a gif.
        from moviepy.editor import ImageSequenceClip
        model_file = '{}/my_policy_net_pg.ckpt'.format(outdir)
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            saver.restore(sess, save_path=model_file)
            # Run model.
            obs = env.reset()
            done = False
            steps = []
            rewards = []
            while not done:
                s = env.render('rgb_array')
                steps.append(s)
                action_val = sess.run(action,
                                      feed_dict={X: obs.reshape(1, n_inputs)})
                obs, reward, done, info = env.step(action_val[0][0])
                rewards.append(reward)
            print('Final reward :', np.mean(rewards))
        clip = ImageSequenceClip(steps, fps=30)
        clip.write_gif('cartpole.gif', fps=30)
Exemplo n.º 15
0
def get_output(video_path,
               out_filename,
               label,
               fps=30,
               font_scale=0.5,
               font_color='white',
               target_resolution=None,
               resize_algorithm='bicubic',
               use_frames=False):
    """Get demo output using ``moviepy``.

    This function will generate video file or gif file from raw video or
    frames, by using ``moviepy``. For more information of some parameters,
    you can refer to: https://github.com/Zulko/moviepy.

    Args:
        video_path (str): The video file path or the rawframes directory path.
            If ``use_frames`` is set to True, it should be rawframes directory
            path. Otherwise, it should be video file path.
        out_filename (str): Output filename for the generated file.
        label (str): Predicted label of the generated file.
        fps (int): Number of picture frames to read per second. Default: 30.
        font_scale (float): Font scale of the label. Default: 0.5.
        font_color (str): Font color of the label. Default: 'white'.
        target_resolution (None | tuple[int | None]): Set to
            (desired_width desired_height) to have resized frames. If either
            dimension is None, the frames are resized by keeping the existing
            aspect ratio. Default: None.
        resize_algorithm (str): Support "bicubic", "bilinear", "neighbor",
            "lanczos", etc. Default: 'bicubic'. For more information,
            see https://ffmpeg.org/ffmpeg-scaler.html
        use_frames: Determine Whether to use rawframes as input. Default:False.
    """

    if video_path.startswith(('http://', 'https://')):
        raise NotImplementedError

    try:
        from moviepy.editor import ImageSequenceClip
    except ImportError:
        raise ImportError('Please install moviepy to enable output file.')

    # Channel Order is BGR
    if use_frames:
        frame_list = sorted(
            [osp.join(video_path, x) for x in os.listdir(video_path)])
        frames = [cv2.imread(x) for x in frame_list]
    else:
        video = decord.VideoReader(video_path)
        frames = [x.asnumpy()[..., ::-1] for x in video]

    if target_resolution:
        w, h = target_resolution
        frame_h, frame_w, _ = frames[0].shape
        if w == -1:
            w = int(h / frame_h * frame_w)
        if h == -1:
            h = int(w / frame_w * frame_h)
        frames = [cv2.resize(f, (w, h)) for f in frames]

    textsize = cv2.getTextSize(label, cv2.FONT_HERSHEY_DUPLEX, font_scale,
                               1)[0]
    textheight = textsize[1]
    padding = 10
    location = (padding, padding + textheight)

    if isinstance(font_color, str):
        font_color = webcolors.name_to_rgb(font_color)[::-1]

    frames = [np.array(frame) for frame in frames]
    for frame in frames:
        cv2.putText(frame, label, location, cv2.FONT_HERSHEY_DUPLEX,
                    font_scale, font_color, 1)

    # RGB order
    frames = [x[..., ::-1] for x in frames]
    video_clips = ImageSequenceClip(frames, fps=fps)

    out_type = osp.splitext(out_filename)[1][1:]
    if out_type == 'gif':
        video_clips.write_gif(out_filename)
    else:
        video_clips.write_videofile(out_filename, remove_temp=True)
Exemplo n.º 16
0
frames = []
for episode in range(3):
    done = False
    state = env.reset()
    frame = env.render()
    frames.append(np.asarray(frame, dtype="int32"))
    while not done:
        tensor_state = torch.FloatTensor(state).unsqueeze(0).to(device)
        action = q_net.select_greedyaction(tensor_state)
        print(action)
        done, reward = env.step(action)
        state = env.player
        frame = env.render()
        frames.append(np.asarray(frame, dtype="int32"))

clip = ImageSequenceClip(frames, fps=20)
clip.write_gif('test.gif', fps=20)

plt.figure(1)
plt.title('Performance over learning (DQN)')
plt.plot(episode_rewards[:, 0], episode_rewards[:, 1])
plt.xlabel('time steps')
plt.ylabel('total reward')

plt.figure(2)
plt.title('Performance on Test Env (DQN)')
xv = np.arange(EVAL_EVERY - 1, N_EPISODES + 1, EVAL_EVERY)
plt.plot(episode_rewards[xv, 0], episode_rewards[xv, 2], ':o')
plt.xlabel('time steps')
plt.ylabel('expected total reward (greedy policy)')
plt.show()
Exemplo n.º 17
0
    plt.ylabel('Cases')

    plt.legend(['Susceptible', 'Infectious', 'Dead', 'Recovered'])

    plt.subplot(224)
    plt.plot(Stat2.index, Stat2['Healthy'], 'b')
    plt.plot(Stat2.index, Stat2['infected'], 'r')
    plt.plot(Stat2.index, Stat2['Dead'], 'k')
    plt.plot(Stat2.index, Stat2['Cured'], 'g')
    plt.xlabel('Days')
    plt.ylabel('Cases')

    plt.legend(['Susceptible', 'Infectious', 'Dead', 'Recovered'])

    #data=np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
    fig.canvas.draw()
    #data=np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
    data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
    data = data.reshape(fig.canvas.get_width_height()[::-1] + (3, ))
    #plt.show()

    ims.append(data)
    plt.clf()
    plt.cla()

from moviepy.editor import ImageSequenceClip  #Somehow doesn't work if put at the top, possible conflict with matplotlib

clip = ImageSequenceClip(ims, fps=3)
clip.write_gif('simulation.gif', fps=3)
clip.write_videofile('simulation.mp4', fps=3)
Exemplo n.º 18
0
Arquivo: utils.py Projeto: vanstrn/MV
 def SaveGIF(self, clip, name, fps=10):
     clip = ImageSequenceClip(clip, fps=fps)
     clip.write_gif("{}/images/{}.gif".format(self.LOG_PATH, name), fps=fps)
Exemplo n.º 19
0
        # write video stream
        vidfile.write(frame)

        if frame_id >= 1000:
            print("recorded 1000 frames")
            break

        print("%.3f %d %.3f %d %d(ms)" %
              (ts, frame_id, angle, btn, int((time.time() - ts) * 1000)))

if fpv_video:
    textColor = (255, 255, 255)
    bgColor = (0, 0, 0)
    for i in xrange(len(frame_arr)):
        newImage = Image.new('RGBA', (100, 20), bgColor)
        drawer = ImageDraw.Draw(newImage)
        drawer.text((0, 0), "Frame #{}".format(i), fill=textColor)
        drawer.text((0, 10), "Angle:{}".format(angle_arr[i]), fill=textColor)
        newImage = cv2.cvtColor(np.array(newImage), cv2.COLOR_BGR2RGBA)
        frame_arr[i] = cm.overlay_image(frame_arr[i],
                                        newImage,
                                        x_offset=0,
                                        y_offset=0)

    #Create a video using the frames collected
    clip = ImageSequenceClip(frame_arr, fps=30)
    clip.write_videofile('fpv-video.mp4')  #

print("Finish..")
turn_off()
image_dir = '/home/mia/backup/research/autonomous_driving/cityscapes/dataset/val_images'
image_list = os.listdir(image_dir)
image_list.sort()
print(f'{len(image_list)} frames found')

test = load_img(f'{image_dir}/{image_list[1]}')
test = img_to_array(test)
pipeline(test, video=False)

for image_dir in ['stuttgart_00', 'stuttgart_01', 'stuttgart_02']:
    os.mkdir(f'outputs/{image_dir}')
    image_list = os.listdir(image_dir)
    image_list.sort()
    print(f'{len(image_list)} frames found')
    for i in tqdm(range(len(image_list))):
        try:
            test = load_img(f'{image_dir}/{image_list[i]}')
            test = img_to_array(test)
            segmap = pipeline(test,
                              video=False,
                              fname=f'{image_list[i]}',
                              folder=image_dir)
            if segmap == False:
                break
        except Exception as e:
            print(str(e))
    clip = ImageSequenceClip(sorted(glob(f'outputs/{image_dir}/*')),
                             fps=18,
                             load_images=True)
    clip.write_videofile(f'{image_dir}.mp4')
Exemplo n.º 21
0
def make_movie(imgs, filename):
    clip = ImageSequenceClip(imgs, fps=int(30 / args.frame_repeat))
    clip.write_videofile(filename)
Exemplo n.º 22
0
def main():
    logging.basicConfig(
        format='%(asctime)s [%(name)s] [%(levelname)s] %(message)s')
    log = logging.getLogger("TimeLapse")
    log.setLevel(LogLevel)
    log.info("TimeLapse Starting!")

    #Read in the Cameras!
    camfile = open("cams.json")
    cameras = json.load(camfile)
    camfile.close()

    for x in cameras.keys():
        try:
            os.mkdir(x)
            os.mkdir(x + "-mp4")
        except Exception as e:
            log.error("Could not create folder " + x + "!")
            log.error("Exception: " + str(e))
        log.info("Added Camera " + x)
        if cameras[x]["type"] == "etag":
            cameras[x]["instance"] = ETagDownload(cameras[x]["url"], x)
        if cameras[x]["type"] == "max-age":
            cameras[x]["instance"] = MaxAgeDownload(cameras[x]["url"], x)
        if cameras[x]["type"] == "max-age-etag":
            cameras[x]["instance"] = MaxAgeETagDownload(cameras[x]["url"], x)
        cameras[x]["instance"].start()

    while True:
        command = input(">")
        if command == "list-cams":
            for x in cameras.keys():
                log.info(x + ": Running? " +
                         str(cameras[x]["instance"].is_running))
        elif command.startswith("stopcam"):
            cam = command.split(" ")[1]
            if not cam in cameras.keys():
                log.error("Invalid Camera name")
            else:
                log.info("Camera " + cam + " stopped!")
                cameras[cam]["instance"].stop()

        elif command.startswith("startcam"):
            cam = command.split(" ")[1]
            if not cam in cameras.keys():
                log.error("Invalid Camera name")
            else:
                log.info("Camera " + cam + " started!")
                cameras[cam]["instance"].start()
        elif command.startswith("save"):
            for x in cameras.keys():
                cameras[x]["instance"].stop()
            log.info("All Camers Stopped!")
            for x in cameras.keys():
                log.info("Saving " + x)
                data = livejson.File(x + "-mp4/cat.json")
                t = str(int(time.time()))
                q = ImageSequenceClip(x, fps=cameras[x]["fps"])
                q.write_videofile(x + "-mp4/" + t + ".mp4")
                data[t] = glob(x + "/*.jpg")
                for y in glob(x + "/*.jpg"):
                    os.remove(y)
            log.info("All Cameras started!")
            for x in cameras.keys():
                cameras[x]["instance"].start()
        elif command.startswith("stopcams"):
            for x in cameras.keys():
                cameras[x]["instance"].stop()
            log.info("All Camers Stopped!")
        elif command.startswith("startcams"):
            for x in cameras.keys():
                cameras[x]["instance"].start()
            log.info("All Camers Stopped!")
        elif command == "stop":
            log.info("Stopping all cameras and halting!")
            for x in cameras.keys():
                cameras[x]["instance"].stop()
            break
        elif command == "help":
            log.info("help: Shows this.")
            log.info("list-cams: Lists the current cameras.")
            log.info("stopcam: Stops a cameras recording.")
            log.info("startcam: Starts a camera recording.")
            log.info("stop: Stops this program.")

        else:
            log.error("Command not found! " + command)
Exemplo n.º 23
0
def get_output(video_path,
               out_filename,
               label,
               fps=30,
               font_size=20,
               font_color='white',
               target_resolution=None,
               resize_algorithm='bicubic',
               use_frames=False):
    """Get demo output using ``moviepy``.

    This function will generate video file or gif file from raw video or
    frames, by using ``moviepy``. For more information of some parameters,
    you can refer to: https://github.com/Zulko/moviepy.

    Args:
        video_path (str): The video file path or the rawframes directory path.
            If ``use_frames`` is set to True, it should be rawframes directory
            path. Otherwise, it should be video file path.
        out_filename (str): Output filename for the generated file.
        label (str): Predicted label of the generated file.
        fps (int): Number of picture frames to read per second. Default: 30.
        font_size (int): Font size of the label. Default: 20.
        font_color (str): Font color of the label. Default: 'white'.
        target_resolution (None | tuple[int | None]): Set to
            (desired_width desired_height) to have resized frames. If either
            dimension is None, the frames are resized by keeping the existing
            aspect ratio. Default: None.
        resize_algorithm (str): Support "bicubic", "bilinear", "neighbor",
            "lanczos", etc. Default: 'bicubic'. For more information,
            see https://ffmpeg.org/ffmpeg-scaler.html
        use_frames: Determine Whether to use rawframes as input. Default:False.
    """

    if video_path.startswith(('http://', 'https://')):
        raise NotImplementedError

    try:
        from moviepy.editor import (CompositeVideoClip, ImageSequenceClip,
                                    TextClip, VideoFileClip)
    except ImportError:
        raise ImportError('Please install moviepy to enable output file.')

    if use_frames:
        frame_list = sorted(
            [osp.join(video_path, x) for x in os.listdir(video_path)])
        video_clips = ImageSequenceClip(frame_list, fps=fps)
    else:
        # revert the order to suit ``VideoFileClip``.
        # (weight, height) -> (height, weight)
        target_resolution = (target_resolution[1], target_resolution[0])
        video_clips = VideoFileClip(video_path,
                                    target_resolution=target_resolution,
                                    resize_algorithm=resize_algorithm)

    duration_video_clip = video_clips.duration
    text_clips = TextClip(label, fontsize=font_size, color=font_color)
    text_clips = (text_clips.set_position(
        ('right', 'bottom'), relative=True).set_duration(duration_video_clip))

    video_clips = CompositeVideoClip([video_clips, text_clips])

    out_type = osp.splitext(out_filename)[1][1:]
    if out_type == 'gif':
        video_clips.write_gif(out_filename)
    else:
        video_clips.write_videofile(out_filename, remove_temp=True)