from cv2 import VideoWriter, VideoWriter_fourcc, imshow, waitKey, imwrite def simple_agent(env): if not env.started: return 1 ball_pos = env.ball.center() paddle_pos = env.paddle.center() if paddle_pos[1] < ball_pos[1]: return 2 else: return 3 vid = VideoWriter('demo.avi', VideoWriter_fourcc(*"XVID"), float(30), (160, 210), False) env = Breakout({ 'max_step': 1000, # 'lifes': 7, 'ball_speed': [5, -2], # 'ball_size': [5, 5], # 'ball_color': 200, # 'paddle_width': 50, 'paddle_speed': 5 }) for ep in range(1): obs = env.reset() for t in itertools.count(): # action = random.randint(0, env.actions - 1)
def main(title: str): title = str(title) fps = config['fps'] result, audio_url = crawler.crawler(title) width = config['width'] height = config['height'] for key in result.keys(): image_name = str(key) image_url = result[key]['image_url'] image_dir = os.sep.join([".", "resource", title]) crawler.save_image(image_url, image_dir, image_name) fourcc = VideoWriter_fourcc(*'mp4v') output_dir = os.sep.join(['.', 'output']) if not os.path.exists(output_dir): print("Folder", output_dir, 'does not exist. Creating...') os.makedirs(output_dir) video = VideoWriter(os.sep.join([output_dir, str(title) + '.mp4']), fourcc, float(config['fps']), (config['width'], config['height'])) font = ImageFont.truetype(config['font'], config['title_font_size'], encoding="utf-8") font2 = ImageFont.truetype(config['font'], config['content_font_size'], encoding="utf-8") title_wrapper = text_processing.Wrapper(font) content_wrapper = text_processing.Wrapper(font2) keys = list(result.keys()) keys.append(0) keys.sort() keys.append(keys[len(keys) - 1] + 10) print(keys) frame = image_processing.create_blank_frame("", "", (width, height), title_wrapper, content_wrapper, font, font2) total_length = keys[len(keys) - 1] * fps index = 0 for i in range(total_length): if (index + 1 > len(keys) - 1): frame = image_processing.create_blank_frame( "", "", (width, height), title_wrapper, content_wrapper, font, font2) elif (i / fps) > keys[index + 1]: index += 1 print(index, "out of", len(keys)) key = keys[index] image = image = os.sep.join([ '.', 'resource', title, str(key) + text_processing.find_image_suffix(result[key]['image_url']) ]) header = result[key]['header'] content = result[key]['content'] print("标题:", header) if (result[key]['image_suffix'] in ['.gif', '.GIF']): frame = image_processing.create_blank_frame( header, content, (width, height), title_wrapper, content_wrapper, font, font2) else: frame = image_processing.create_frame(image, header, content, (width, height), title_wrapper, content_wrapper, font, font2) os.remove(image) else: "" video.write(frame) print(title, "finished!")
from buffers import ReplayBuffer import os import pickle from sys import stdout import itertools import cv2 from cv2 import VideoWriter, VideoWriter_fourcc from PIL import Image width = 1024 height = 768 FPS = 60 fourcc = VideoWriter_fourcc(*'MP42') use_cuda = torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") def set_seed(n_seed): np.random.seed(n_seed) torch.manual_seed(n_seed) if device == "cuda": torch.cuda.manual_seed(n_seed) #------------------------------------------------------------- # # Classes
def save_images(): # Checks to see if the folder specified from the variables module exists and if not, creates it if not os.path.exists('../images/saved/' + v.FOLDER_NAME): os.makedirs('../images/saved/' + v.FOLDER_NAME) # checks to see if the .txt file that stores the image index exists, if not, it creates it, if so, it increments it if os.path.isfile('../images/saved/' + v.FOLDER_NAME + '/' + v.EXPERIMENT_NAME + '_index.txt'): file = open( '../images/saved/' + v.FOLDER_NAME + '/' + v.EXPERIMENT_NAME + '_index.txt', 'r') ind = int(file.read()) file.close() ind = ind + 1 file = open( '../images/saved/' + v.FOLDER_NAME + '/' + v.EXPERIMENT_NAME + '_index.txt', 'w') file.write(str(ind)) file.close() else: file = open( '../images/saved/' + v.FOLDER_NAME + '/' + v.EXPERIMENT_NAME + '_index.txt', 'x') file.close() file = open( '../images/saved/' + v.FOLDER_NAME + '/' + v.EXPERIMENT_NAME + '_index.txt', 'w') ind = 1 file.write(str(ind)) file.close() filename_head = '../images/saved/' + v.FOLDER_NAME + '/' + v.EXPERIMENT_NAME + '_' + str( ind) if v.frame_amount is 1: # Saving of all the images stored in the variables module imwrite(filename_head + '_img_1.jpg', v.img1[0]) imwrite(filename_head + '_warp_1.jpg', v.warp1[0]) imwrite(filename_head + '_mask_1.jpg', v.mask1[0]) if v.reflection_point_tracking: imwrite(filename_head + '_img_2.jpg', v.img2[0]) imwrite(filename_head + '_warp_2.jpg', v.warp2[0]) imwrite(filename_head + '_mask_2.jpg', v.mask2[0]) else: mask_filename = filename_head + '_mask_1.avi' warp_filename = filename_head + '_warp_1.avi' mask_vw = VideoWriter(mask_filename, VideoWriter_fourcc('M', 'J', 'P', 'G'), 15, (500, 500)) warp_vw = VideoWriter(warp_filename, VideoWriter_fourcc('M', 'J', 'P', 'G'), 15, (500, 500)) for i in range(v.frame_amount): mask_vw.write(v.mask1[i]) warp_vw.write(v.warp1[i]) mask_vw.release() warp_vw.release() playback = VideoCapture(filename_head + '_mask_1.avi') if (playback.isOpened() == False): print("Error opening video stream or file") images_loop = [] ret = True while ret: ret, frame = playback.read() if ret: images_loop.append(frame) for frame in itr.cycle(images_loop): imshow('Playback', frame) k = waitKey(40) if k is ord(' '): break playback.release() # Data list as strings that will be passed into the .csv file data = [ str(ind), str(v.x_guide), str(v.zone_x), str(v.zone_y), str(v.point1), str(v.point2), str(v.point_adjust), str(datetime.datetime.now()), v.FREQUENCY, v.REFLECTION_ANGLE, v.REFLECTION_DISTANCE, v.FOLDER_NAME, v.EXPERIMENT_NAME, v.INSTANCE_ONE_NAME, v.INSTANCE_TWO_NAME, str(v.reflection_point_tracking), str(v.frame_amount) ] # Checks to see if the .csv file exists, if not, creates one and writes the header to it if not os.path.isfile('../images/saved/' + v.FOLDER_NAME + '/' + v.EXPERIMENT_NAME + '_logs.csv'): file = open( '../images/saved/' + v.FOLDER_NAME + '/' + v.EXPERIMENT_NAME + '_logs.csv', 'x') file.close() file = open( '../images/saved/' + v.FOLDER_NAME + '/' + v.EXPERIMENT_NAME + '_logs.csv', 'w') file.write( "index,zone#,x_zone,y_zone,point1,point2,point_calibration,date,frequency,angle,distance,folder,experiment,instance_one,instance_two,extra measurement,frame amount\n" ) file.close() # Writes the values specified in 'data' to the .csv with open( '../images/saved/' + v.FOLDER_NAME + '/' + v.EXPERIMENT_NAME + '_logs.csv', 'a') as f: writer = csv.writer(f) writer.writerows([data]) # Returns the picture initiation variables back to False to make sure it doesn't continue taking photos v.take_pic = False v.point_adjust = False v.gui_display = True
def detect_and_cover(self, image_path=None, fname=None, save_path='', is_video=False, orig_video_folder=None, force_jpg=False, is_mosaic=False, dilation=0): assert image_path assert fname # replace these with something better? if is_video: # Video capture video_path = image_path vcapture = VideoCapture(video_path) width = int(vcapture.get(CAP_PROP_FRAME_WIDTH)) height = int(vcapture.get(CAP_PROP_FRAME_HEIGHT)) fps = vcapture.get(CAP_PROP_FPS) # Define codec and create video writer, video output is purely for debugging and educational purpose. Not used in decensoring. file_name = fname + "_with_censor_masks.mp4" vwriter = VideoWriter(file_name, VideoWriter_fourcc(*'mp4v'), fps, (width, height)) count = 0 success = True print("Video read complete, starting video detection:") while success: print("frame: ", count) # Read next image success, image = vcapture.read() if success: # OpenCV returns images as BGR, convert to RGB image = image[..., ::-1] # save frame into decensor input original. Need to keep names persistent. im_name = fname[: -4] # if we get this far, we definitely have a .mp4. Remove that, add count and .png ending file_name = orig_video_folder + im_name + str(count).zfill( 6 ) + '.png' # NOTE Should be adequite for having 10^6 frames, which is more than enough for even 30 mintues total. # print('saving frame as ', file_name) skimage.io.imsave(file_name, image) # Detect objects r = self.model.detect([image], verbose=0)[0] # Remove unwanted class, code from https://github.com/matterport/Mask_RCNN/issues/1666 remove_indices = np.where( r['class_ids'] != 2) # remove bars: class 1 new_masks = np.delete(r['masks'], remove_indices, axis=2) # Apply cover cov, mask = self.apply_cover(image, new_masks, dilation) # save covered frame into input for decensoring path file_name = save_path + im_name + str(count).zfill( 6) + '.png' # print('saving covered frame as ', file_name) skimage.io.imsave(file_name, cov) # RGB -> BGR to save image to video cov = cov[..., ::-1] # Add image to video writer vwriter.write(cov) count += 1 vwriter.release() print('video complete') else: # Run on Image try: image = skimage.io.imread( image_path) # problems with strange shapes if image.ndim != 3: image = skimage.color.gray2rgb( image) # convert to rgb if greyscale if image.shape[-1] == 4: image = image[..., :3] # strip alpha channel except: print( "ERROR in detect_and_cover: Image read. Skipping. image_path=", image_path) return # Detect objects # image_ced =Canny(image=image, threshold1=10, threshold2=42) # image_ced = 255 - image_ced # image_ced = cvtColor(image_ced,COLOR_GRAY2RGB) # skimage.io.imsave(save_path + fname[:-4] + '_ced' + '.png', image_ced) try: # r = self.model.detect([image_ced], verbose=0)[0] r = self.model.detect([image], verbose=0)[0] except Exception as e: print("ERROR in detect_and_cover: Model detection.", e) return # Remove unwanted class, code from https://github.com/matterport/Mask_RCNN/issues/1666 if is_mosaic == True or is_video == True: remove_indices = np.where( r['class_ids'] != 2) # remove bars: class 2 else: remove_indices = np.where( r['class_ids'] != 1) # remove mosaic: class 1 new_masks = np.delete(r['masks'], remove_indices, axis=2) # except: # print("ERROR in detect_and_cover: Model detect") cov, mask = self.apply_cover(image, new_masks, dilation) try: # Save output, now force save as png file_name = save_path + fname[:-4] + '.png' skimage.io.imsave(file_name, cov) except: print( "ERROR in detect_and_cover: Image write. Skipping. image_path=", image_path)
def write_video(frames, fps, filename): fourcc = VideoWriter_fourcc(*'mp4v') video = VideoWriter(filename, fourcc, fps, (hp.img_width, hp.img_height)) for frame in frames: video.write(frame) video.release()
SIMULATION_TIME) # advance facility and simulation time SIMULATION_TIME = SIMULATION_TIME + ONE_SECOND TEST_FACILITY.advance_time_facility() # output queue summary TEST_FACILITY.export_queue_summary_to_csv() # create video file WIDTH = 640 HEIGHT = 480 FPS = 30 SECONDS = SECONDS_IN_DAY / 30 FOURCC = VideoWriter_fourcc(*'MP42') VIDEO = VideoWriter('test.avi', FOURCC, float(FPS), (WIDTH, HEIGHT)) IMG_FILES = os.listdir() for i in IMG_FILES: print(i) img = cv2.imread(i) VIDEO.write(img) VIDEO.release() # remove img files IMG_FILES = os.listdir() for i in IMG_FILES: if 'png' in i: os.remove(i) print('Runtime: ' + str(datetime.datetime.now() - SCRIPT_RUNTIME_START))
import cv2 as cv import numpy as np from cv2 import VideoWriter, VideoWriter_fourcc fourcc = VideoWriter_fourcc(*'XVID') cap = cv.VideoCapture('images/HW.avi') ret, first_frame = cap.read() average_value = np.float32(first_frame) out = VideoWriter('output.avi', fourcc, 20.0, (first_frame.shape[1], first_frame.shape[0])) while cap.isOpened(): ret, frame = cap.read() if frame is None: break cv.accumulateWeighted(frame, average_value, 0.05) resulting_avg = cv.convertScaleAbs(average_value) resulting_avg_gray = cv.cvtColor(resulting_avg, cv.COLOR_BGR2GRAY) frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) difference = cv.absdiff(resulting_avg_gray, frame_gray) _, difference = cv.threshold(difference, 25, 255, cv.THRESH_BINARY) cv.imshow('Difference', difference) frame[np.where(difference == 255)] = (0, 0, 255) out.write(frame) if ret: cv.imshow('Frame', frame) if cv.waitKey(20) & 0xFF == ord('q'): break else: break
def VideoArrayWrite(VideoArray,output_folder,**kwargs): if not os.path.exists(output_folder): try : os.makedirs(output_folder) except FileExistsError: pass if "alerts" in kwargs: alerts = kwargs.get("alerts") else: alerts = True if "output_name" in kwargs: output_name = kwargs.get("output_name") else: #ERROR 2 FLAG: LOOK FOR REASON HERE : BEGINING if "input_path" in kwargs: input_path = kwargs.get("input_path") path,file = os.path.split(input_path) if file.endswith(".seq") and path != "": output_name = os.path.basename(path) else: if path == "" or path == None: output_name = file # sys.exit("ERROR 2 INVALID_PATH : You must either specify a filename with : output_name = ""Nameofyourfile"" (better practice is doing it iteratively) or the path input to get the seq file, used in HirisSeqReader, with input_path = ""pathtoyourvideo"" ") else : if "\\" not in path: output_name = path else : output_name = os.path.basename(path) else : #ERROR 2 FLAG : LOOK FOR REASON HERE : END sys.exit("ERROR 2 FILE_NOT_FOUND : You must either specify a filename with : output_name = ""Nameofyourfile"" (better practice is doing it iteratively) or the path input to get the seq file, used in HirisSeqReader, with input_path = ""pathtoyourvideo"" ") print(output_name) if "extension" in kwargs: extension = kwargs.get("extension") else: if alerts : print(colored("Using default extension (.avi) as none was specified","blue")) extension = ".avi" if "fps" in kwargs: fps = kwargs.get("fps") else: fps = 30 if alerts : print(colored("Using default framerate (30 fps) as none was specified","blue")) if "codec" in kwargs: codec = kwargs.get("codec") else: codec = "MJPG" if alerts : print(colored("Using default codec (MJPG) as none was specified","blue")) if "color" in kwargs: color = kwargs.get("color") else: color = False if alerts : print(colored("Interpreting data as greyscale images as no color info was specified","blue")) FullOutputPathname = os.path.join(output_folder,output_name+extension) size = np.size(VideoArray,1) , np.size(VideoArray,0) fourcc = VideoWriter_fourcc(*codec) vid = VideoWriter(FullOutputPathname, fourcc, fps, size, color) bar = pyprind.ProgBar(int(np.size(VideoArray,2)),bar_char='▓') for ImageIndex in range(np.size(VideoArray,2)): vid.write(np.uint8(VideoArray[:,:,ImageIndex])) # print(ImageIndex) bar.update() vid.release() del bar del VideoArray print("Video compression and writing sucessfull\n") gc.collect()
def create_video(full_data_file=None): if full_data_file is None: data_file = '../saved_data/fla/fla_model_outdoor_A_sym_01-21-2020-15-45-02.pt' checkpoint = torch.load(data_file) args = checkpoint['args'] print(args) device = torch.device('cuda:0') if args.cuda else torch.device('cpu') tensor_type = torch.double if args.double else torch.float if args.megalith: dataset_dir = '/media/datasets/' else: dataset_dir = '/media/m2-drive/datasets/' image_dir = dataset_dir + 'fla/2020.01.14_rss2020_data/2017_05_10_10_18_40_fla-19/flea3' pose_dir = dataset_dir + 'fla/2020.01.14_rss2020_data/2017_05_10_10_18_40_fla-19/pose' normalize = transforms.Normalize(mean=[0.45], std=[0.25]) transform = transforms.Compose([ torchvision.transforms.Resize(256), torchvision.transforms.CenterCrop(224), transforms.ToTensor(), normalize, ]) dim_in = 2 train_dataset = '../experiments/FLA/{}_train.csv'.format(args.scene) train_loader = DataLoader(FLADataset(train_dataset, image_dir=image_dir, pose_dir=pose_dir, transform=transform), batch_size=args.batch_size_train, pin_memory=False, shuffle=True, num_workers=args.num_workers, drop_last=False) valid_dataset = FLADataset( '../experiments/FLA/all_moving_unshuffled.csv', image_dir=image_dir, pose_dir=pose_dir, transform=transform) #valid_dataset = torch.utils.data.ConcatDataset([valid_dataset1, valid_dataset2, valid_dataset3]) #test_dataset = FLADataset('FLA/{}_test.csv'.format(args.scene), image_dir=image_dir, pose_dir=pose_dir, transform=transform) valid_loader = DataLoader(valid_dataset, batch_size=args.batch_size_test, pin_memory=True, shuffle=False, num_workers=args.num_workers, drop_last=False) model = QuatFlowNet(enforce_psd=args.enforce_psd, unit_frob_norm=args.unit_frob, dim_in=dim_in, batchnorm=args.batchnorm).to(device=device, dtype=tensor_type) model.load_state_dict(checkpoint['model'], strict=False) A_predt, q_estt, q_targett = evaluate_A_model(train_loader, model, device, tensor_type) A_pred, q_est, q_target = evaluate_A_model(valid_loader, model, device, tensor_type) data = ((A_predt, q_estt, q_targett), (A_pred, q_est, q_target)) desc = data_file.split('/')[-1].split('.pt')[0] saved_data_file_name = 'processed_video_{}.pt'.format(desc) full_data_file = '../saved_data/fla/{}'.format(saved_data_file_name) torch.save({'file_fla': data_file, 'data_fla': data}, full_data_file) print('Saved data to {}.'.format(full_data_file)) else: data = torch.load(full_data_file) quantile = 0.75 uncertainty_metric_fn = sum_bingham_dispersion_coeff (A_train, _, _), (A_test, q_est, q_target) = data['data_fla'] thresh = compute_threshold(A_train.numpy(), uncertainty_metric_fn=uncertainty_metric_fn, quantile=quantile) mask = compute_mask(A_test.numpy(), uncertainty_metric_fn, thresh) transform = transforms.ToTensor() dataset_dir = '/Users/valentinp/Dropbox/2020.01.14_rss2020_data/2017_05_10_10_18_40_fla-19/' image_dir = dataset_dir + 'flea3' pose_dir = dataset_dir + 'pose' all_dataset = FLADataset('../FLA/all_moving_unshuffled.csv', image_dir=image_dir, pose_dir=pose_dir, transform=transform) fourcc = VideoWriter_fourcc(*'MP4V') FPS = 60 width = 640 height = 512 video_array = np.empty((len(all_dataset), height, width, 3)) for i in range(len(all_dataset)): imgs, _ = all_dataset[i] img = imgs[0].numpy().reshape(height, width, 1) * 255 img = img.repeat(3, axis=2).astype(np.uint8) if mask[i] == 0: img[:100, :100, 0] = 255 img[:100, :100, 1] = 0 img[:100, :100, 2] = 0 else: img[:100, :100, 0] = 0 img[:100, :100, 1] = 255 img[:100, :100, 2] = 0 video_array[i] = img if i % 1000 == 0: print(i) torchvision.io.video.write_video('fla.mp4', video_array, FPS, video_codec='mpeg4', options=None)
def run(self, filename='AttentionMap'): fourcc = VideoWriter_fourcc(*'mp4v') rgb_frame = self.env.render("rgb_array") real_shape = (int(rgb_frame.shape[1]), int(rgb_frame.shape[0])) video = VideoWriter("./{}.mp4".format(filename), fourcc, float(self.FPS), real_shape) for _ in range(self.FPS*self.seconds): rgb_frame = self.env.render("rgb_array") rgb_frame = cv2.resize( rgb_frame, real_shape, interpolation=cv2.INTER_AREA) t_state = torch.cuda.FloatTensor(self.state) dist, self.hidden, value = self.nnet.forward(t_state, self.hidden) self.hidden = (self.hidden[0].detach(), self.hidden[1].detach()) dist = self.nnet.wrap_dist(dist) action = dist.sample().detach() log_prob = dist.log_prob(action) loss = (torch.exp(log_prob) * 0.1).mean() loss.backward() self.output = self.outputs[-1].detach().cpu().numpy() self.gradient = self.gradients[-1].detach().cpu().numpy()[0] cam = numpy.zeros(self.output.shape[1:], dtype=numpy.float32) for i, w in enumerate(self.gradient): cam += w * self.output[i, :, :] cam = cam - numpy.mean(cam) cam = numpy.maximum(cam, 0) cam = cv2.resize(cam, real_shape) cam = cam - numpy.min(cam) cam = cam / numpy.max(cam) cam = cam * 255. cam = cam.astype(numpy.uint8) cam: numpy.ndarray = cv2.applyColorMap(cam, cv2.COLORMAP_JET) cam[:, :, 0] -= numpy.sum(cam, axis=-1).min() transparency = 0.7 cam = cam * transparency cam = cam.astype(numpy.uint8) cam = numpy.clip(cam, 0, 255) cam = cv2.resize( cam, real_shape, interpolation=cv2.INTER_AREA) res = cv2.add(rgb_frame, cam) prep = cv2.cvtColor(res, cv2.COLOR_RGB2BGR) prep = cv2.resize( prep, real_shape, interpolation=cv2.INTER_AREA ) video.write(prep) self.state, _, done, _ = self.env.step(dist.sample().cpu().numpy()) if done.item(): self.hidden = None video.release()
resize = cv2.resize(image, (new_w, new_h)) VideoArray[:, :, fc, frame] = resize[:, :, 0] success, image = vidcap.read() frame = frame + 1 vidcap.release() fc = fc + 1 bar.update() # #imgplot = plt.imshow(image,cmap='gray_r') #plt.show(imgplot) codec = "HVEC" size = new_w * ROWSIZE, new_h * ROWSIZE fourcc = VideoWriter_fourcc(*codec) vid = VideoWriter(OutputFile[ROLL], fourcc, 30, size, False) for I in range(length): print(I) column = 0 row = 0 # VignetteBuffer = np.full([new_h * ROWSIZE, new_w * ROWSIZE,3],0) VignetteBuffer = np.full([new_h * ROWSIZE, new_w * ROWSIZE], 0) for J in range(NBfiles): # print(I, J, column*new_h , (column+1)*new_h, row*new_w, (row+1)*new_w, "FULLSIZE : ", new_h * ROWSIZE, new_w * ROWSIZE) # VignetteBuffer[column*new_h:(column+1)*new_h , row*new_w : (row+1)*new_w , 0] = VideoArray[:,:,J,I] VignetteBuffer[column * new_h:(column + 1) * new_h, row * new_w:(row + 1) * new_w] = VideoArray[:, :, J, I] # VignetteBuffer[:,:,1]=VignetteBuffer[:,:,0]
def detect_objects_in_video(video_path, model, labels_to_names, video_output_name, output="video", fps=30, frames=None, yolbo=False): """ Perform object detection on video data :param video_path: file path to video (str) :param model: pretrained RetinaNet model :param labels_to_names: dictionary mapping integer labels to string names :param video_output_name: output video file (str) to be written :param output: either 'video' for annotated video or 'frames' for specific annotated frames :param fps: frames per second of video :param frames: list of integers representing the annotated frames to write (only if output='frames') :param yolbo: True to run YOLBO (defaults to False) :return: annotated video file (output='video'), or annotated image files (output='frames') """ cap = VideoCapture(video_path) if not cap.isOpened(): return print("Error opening video file") if output == "frames": assert frames is not None, 'Provide frame numbers to return' assert type(labels_to_names ) == dict, 'labels_to_names parameter should be of type: dict' frame_width = int(cap.get(3)) frame_height = int(cap.get(4)) out = VideoWriter(video_output_name, VideoWriter_fourcc('M', 'J', 'P', 'G'), fps, (frame_width, frame_height)) step = 1 detection_matrix = None while cap.isOpened(): ret, frame = cap.read() if ret: if yolbo: draw, detection_matrix = run_retinanet(model, frame, step, frame_height, frame_width, labels_to_names, yolbo, detection_matrix) else: draw = run_retinanet(model, frame, step, frame_height, frame_width, labels_to_names, yolbo, None) if output == 'video': out.write(draw) if output == 'frames': if step in frames: imwrite('frame' + str(step) + '.jpg', draw) step += 1 if waitKey(1) & 0xFF == ord('q'): break else: break cap.release() out.release() destroyAllWindows()
import cv2 import numpy as np # for mathematical operations from skimage.transform import resize # for resizing images from cv2 import VideoWriter, VideoWriter_fourcc count = 0 f = open("trainlist.txt", "r") videoFile = f.readline() videoFile=videoFile.rstrip("\n") print(videoFile) # cap = cv2.VideoCapture("./Dataset_Samples/Normal_Videos004_x264.mp4") # capturing the video from the given path cap=cv2.VideoCapture(videoFile) frameRate = cap.get(5) #frame rate x=1 Y=[] # empty list to store all the segments of videos generated(final output!) X=[] # empty list to store the individual segments of videos fourcc = VideoWriter_fourcc(*'MP42') #some code required for VideoWriter video = VideoWriter('./segment0.avi', fourcc, float(24), (128, 128)) #creates video to store 1st segment while(cap.isOpened()): frameId = cap.get(1) #current frame number ret, frame = cap.read() a = resize(frame, preserve_range=True, output_shape=(128,128)).astype(np.uint8) #reshape frame to 128x128x3 video.write(a) #write frame to generated video if(frameId % 16 == 0 and frameId>0): X1=np.stack(X) #convert X from list to numpy array Y.append(X1) X=[] video = VideoWriter('./segment%d.avi' % (frameId/16), fourcc, float(24), (128, 128)) #create new video X.append(a) #break when video ends if (ret != True): break
from moviepy.editor import * from cv2 import VideoWriter, VideoWriter_fourcc from image import ImageHelper import cv2 if __name__ == "__main__": clip = VideoFileClip("ProducePandas - Lalala.mp4") subClip = clip print(clip.size) frames = subClip.iter_frames() counter = 0 video = VideoWriter("Tiny.mp4", VideoWriter_fourcc(*'mp4v'), 25, (clip.size[0], clip.size[1])) for frame in frames: counter += 1 print(counter, end=' ') cv2Frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) f = ImageHelper.create_frame(cv2Frame) # print(f.shape) video.write(f) # cv2.imwrite("Test"+str(counter)+".png", f) video.release()
def Seq_to_Video(seq_path,output_folder,**kwargs): """ Lecture du fichier binaire de séquence sqb Les données sont représentées par la structure en C suivante : typedef struct { long offset; // 4 bits -> + 4 bits vides car mémoire alignée double TimeStamp; // 8 bits int binfile; // 4 bits -> + 4 bits vides car mémoire alignée } IMGDATA; """ logger = logging.getLogger("Seq_to_Video") logger.setLevel(logging.INFO) if not os.path.exists(seq_path): print(colored("INFO : File do not exist, in folder : {}".format(seq_path),"red")) logger.error("INFO : File do not exist, in folder : {}".format(seq_path)) return False, "input file do not exist" logger.info("Opening file {}".format(seq_path)) if "alerts" in kwargs: alerts = kwargs.get("alerts") else: alerts = True if "output_name" in kwargs: output_name = kwargs.get("output_name") else : input_path = seq_path path,file = os.path.split(input_path) if file.endswith(".seq") and path != "": output_name = os.path.basename(path) else: if path == "" or path == None: output_name = file # sys.exit("ERROR 2 INVALID_PATH : You must either specify a filename with : output_name = ""Nameofyourfile"" (better practice is doing it iteratively) or the path input to get the seq file, used in HirisSeqReader, with input_path = ""pathtoyourvideo"" ") else : if "\\" not in path: output_name = path else : output_name = os.path.basename(path) if "extension" in kwargs: extension = kwargs.get("extension") else: if alerts : print(colored("Using default extension (.avi) as none was specified","blue")) logger.debug("Using default extension (.avi) as none was specified") extension = ".avi" if "fps" in kwargs: fps = kwargs.get("fps") else: fps = 30 if alerts : print(colored("Using default framerate (30 fps) as none was specified","blue")) logger.debug("Using default framerate (30 fps) as none was specified") if "codec" in kwargs: codec = kwargs.get("codec") else: codec = "MJPG" if alerts : print(colored("Using default codec (MJPG) as none was specified","blue")) logger.debug("Using default codec (MJPG) as none was specified") if "color" in kwargs: color = kwargs.get("color") else: color = False if alerts : print(colored("Interpreting data as greyscale images as no color info was specified","blue")) logger.debug("Interpreting data as greyscale images as no color info was specified") FullOutputPathname = os.path.join(output_folder,output_name+extension) logger.debug(output_folder) logger.debug(output_name) logger.debug(FullOutputPathname) if os.path.exists(FullOutputPathname): print("Video {} Already Exist, searching next".format(output_name+".avi")) logger.info("File {} already exist, skipping".format(FullOutputPathname)) return False, "output file already exist" cfg = configparser.ConfigParser() cfg.read(seq_path) try: width = int(cfg.get('Sequence Settings', 'Width')) height = int(cfg.get('Sequence Settings', 'Height')) bpp = int(cfg.get('Sequence Settings', 'BytesPerPixel')) num_images = cfg.get('Sequence Settings', 'Number of files') bin_file = cfg.get('Sequence Settings', 'Bin File') sqb_path = seq_path.replace('.seq', '.sqb') except Exception as e: print(colored("Error : {} on file : {}".format(e,seq_path),"red")) logger.error("Error : {} on file : {}".format(e,seq_path)) return False, "seq config read" pathstr = os.path.dirname(seq_path) if height < 10 or width < 10 : logger.error("Error on file : {}".format(seq_path) + "Width or Heidth not compliant (<10)") return False, "Dimension" if int(num_images) < 10 : #for files in os.path.dirname(seq_path) : #QuickRegexp(files) #if True: # pass#ADD CODE HERE TO TEST IF FILE IS CORRUPTED OR SIMPLY END OF A SESSION logger.error("Error on file : {}".format(seq_path) + "Number of frames not compliant (<10)") return False, "Frames" if not os.path.exists(output_folder): try : os.makedirs(output_folder) except FileExistsError: pass size = width , height fourcc = VideoWriter_fourcc(*codec) vid = VideoWriter(FullOutputPathname, fourcc, fps, size, color) # VideoArray = np.empty([height,width,int(num_images)]) print("Processing Sequence : {}".format(seq_path)) print("Video format : {} x {}".format(height,width)) print(colored("Writing to {}".format(FullOutputPathname),"green")) bar = pyprind.ProgBar(int(num_images),bar_char='░') with open(sqb_path,'rb') as f : try : for i in range(0, int(num_images)): offset = struct.unpack('l', f.read(4)) #This variables are unused but file has to be read in a specific order to acess the valuable data # padding = f.read(4) # timestamp = struct.unpack('d', f.read(8)) f.read(4) struct.unpack('d', f.read(8)) #End of unused variables binfile = struct.unpack('i', f.read(4)) #This variables are unused but file has to be read in a specific order to acess the valuable data # padding = f.read(4) f.read(4) #End of unused variables # print(offset) bin_path = "%s\\%s%0.5d.bin" % (pathstr, bin_file, binfile[0]) # tiff_file_path = "%s_%0.5d.tif" %(tiff_path, i) f_bin = open(bin_path, 'rb') f_bin.seek(offset[0], os.SEEK_SET) bytes = f_bin.read(height*width*bpp) if bpp == 2: buffer = np.frombuffer(bytes, dtype=np.uint16) else: buffer = np.frombuffer(bytes, dtype=np.uint8) nparr2 = buffer.reshape(height, width) # cv2.imwrite(tiff_file_path, nparr2) f_bin.close() # imgplot = plt.imshow(nparr2,cmap='gray_r') # plt.show(imgplot) # print(np.shape(nparr2)) # input() # VideoArray[:,:,i] = nparr2 vid.write(np.uint8(nparr2)) bar.update() # for ImageIndex in range(np.size(VideoArray,2)): # print(ImageIndex) vid.release() except Exception as e: print(colored("Error : {} on file : {}".format(e,seq_path),"red")) logger.error("Error : {} on file : {}".format(e,seq_path)) return False, "binary file I/O" del bar del cfg # del VideoArray gc.collect() print() print("Video compression {} sucessfull".format(seq_path)) logger.info("Video compression {} sucessfull".format(seq_path)) return True, "none"
def gen_video(imgs, size=(1920, 1080), filename='test.mp4'): FPS = 10 seconds = 30 fourcc = VideoWriter_fourcc(*'avc1') video = VideoWriter(filename, fourcc, float(FPS), size) frame = np.random.randint(220, 221, (size[1], size[0], 3), dtype=np.uint8) add_text(frame, 'begining') repeat(video.write, FPS * seconds, frame) for img in imgs: info = img[0].shape x = (size[1] - info[0]) // 2 y = (size[0] - info[1]) // 2 frame = np.random.randint(220, 221, (size[1], size[0], 3), dtype=np.uint8) try: frame[x:(info[0] + x), y:(info[1] + y)] = img[0] add_text(frame, img[1]) repeat(video.write, FPS * seconds, frame) except: print("failed to porcess ") frame = cv2.resize(img[0], (1080, 1920)) add_text(frame, img[1]) repeat(video.write, FPS * seconds, frame) mulface = cv2.imread('../test/res/mulperson.jpg') info = mulface.shape x = (size[1] - info[0])//2 y = (size[0] - info[1])//2 x1 = size[1] - info[0] y1 = size[0] - info[1] temp = [(0, 0), (0, y), (0, y1), (x, 0), (x, y), (x, y1), (x1, 0), (x1, y), (x1, y1)] for i in temp: # many people frame = np.random.randint(220, 221, (size[1], size[0], 3), dtype=np.uint8) mulface = cv2.imread('res/mulperson.jpg') info = mulface.shape print(i) print(info) print(str(i[0]) + ":" + str(info[0] + i[0]) + "\n") print(str(i[1]) + ":" + str(info[1] + i[1]) + "\n") frame[i[0]:(info[0] + i[0]), i[1]:(info[1] + i[1])] = mulface repeat(video.write, FPS * seconds, frame) # moving img = cv2.imread('res/test.jpg') info = img.shape for i in range(0, 1080, 10): frame = np.random.randint(220, 221, (size[1], size[0], 3), dtype=np.uint8) try: frame[i:i+info[0],0:0+info[1]]=img add_text(frame, 'moving') video.write(frame) except: pass for i in range(0, 1920, 10): try: frame = np.random.randint(220, 221, (size[1], size[0], 3), dtype=np.uint8) frame[0:info[0],i:i+info[1]]=img add_text(frame, 'moving') video.write(frame) except: pass src = walk_dir("./src/") for imgfile in src: img = cv2.imread(imgfile) info = img.shape x = (size[1] - info[0]) // 2 y = (size[0] - info[1]) // 2 frame = np.random.randint(220, 221, (size[1], size[0], 3), dtype=np.uint8) try: frame[x:(info[0] + x), y:(info[1] + y)] = img repeat(video.write, FPS * seconds, frame) except: print("failed to porcess ") frame = cv2.resize(img, (1080, 1920)) repeat(video.write, FPS * seconds, frame) video.release()
def Repair_HIRIS(repairfolder,damagedVideopath,outputfolder,**kwargs): logger = logging.getLogger("Repair_HIRIS") logger.setLevel(logging.INFO) if "expectedFrames" in kwargs: expectedFrames = kwargs.get("expectedFrames") else: expectedFrames = 500 if "alerts" in kwargs: alerts = kwargs.get("alerts") else: alerts = True if "output_name" in kwargs: output_name = kwargs.get("output_name") else : output_name = os.path.basename(os.path.dirname(damagedVideopath)) if "extension" in kwargs: extension = kwargs.get("extension") else: extension = ".avi" if alerts : print(colored("Using default extension (.avi) as none was specified","blue")) logger.debug("Using default extension (.avi) as none was specified") if "fps" in kwargs: fps = kwargs.get("fps") else: fps = 30 if alerts : print(colored("Using default framerate (30 fps) as none was specified","blue")) logger.debug("Using default framerate (30 fps) as none was specified") if "codec" in kwargs: codec = kwargs.get("codec") else: codec = "MJPG" if alerts : print(colored("Using default codec (MJPG) as none was specified","blue")) logger.debug("Using default codec (MJPG) as none was specified") if "color" in kwargs: color = kwargs.get("color") else: color = False if alerts : print(colored("Interpreting data as greyscale images as no color info was specified","blue")) logger.debug("Interpreting data as greyscale images as no color info was specified") FullOutputvideo = os.path.join(outputfolder,output_name+extension) if os.path.exists(FullOutputvideo): return False, "out video exists" cfg = configparser.ConfigParser() cfg.read(damagedVideopath) try: width = int(cfg.get('Sequence Settings', 'Width')) height = int(cfg.get('Sequence Settings', 'Height')) bpp = int(cfg.get('Sequence Settings', 'BytesPerPixel')) RepairSubFolder = str(width)+"x"+str(height)+"-"+str(expectedFrames) TrialName = os.path.basename(damagedVideopath) sqb_Name = TrialName.replace('.seq', '.sqb') TrialName = TrialName[0:-4] sqb_path = os.path.join(repairfolder,RepairSubFolder,sqb_Name) pathstr = os.path.dirname(damagedVideopath) except Exception as e: print(colored("Error : {} on file : {}".format(e,damagedVideopath),"red")) logger.error("Error : {} on file : {}".format(e,damagedVideopath)) return False, "seq config read" ListDAMAGED_BINs = BinarySearch(pathstr,".bin") ListCORRECTER_BINs = BinarySearch(os.path.join(repairfolder,RepairSubFolder),".bin") try : ListDAMAGED_BINs = AlphaNum_Sort(ListDAMAGED_BINs) ListCORRECTER_BINs = AlphaNum_Sort(ListCORRECTER_BINs) except Exception as e : print(colored("Error : {} on file : {}".format(e,damagedVideopath),"red")) logger.error("Error : {} on file : {}".format(e,damagedVideopath)) return False, "sorting failed" if len(ListDAMAGED_BINs) != len(ListCORRECTER_BINs): print(colored("Insufficient nb of binaries for file : {}".format(damagedVideopath),"red")) logger.error("Insufficient nb of binaries for file : {}".format(damagedVideopath)) return False, "insufficient binary files" try: size = width , height fourcc = VideoWriter_fourcc(*codec) vid = VideoWriter(FullOutputvideo, fourcc, fps, size, color) except Exception as e: print(colored("Error : {} on file : {}".format(e,FullOutputvideo),"red")) logger.error("Error : {} on file : {}".format(e,FullOutputvideo)) return False, "videowirte open fail" print("Repairing Sequence : {}".format(damagedVideopath)) print("Video format : {} x {}".format(height,width)) print(colored("Writing to {}".format(FullOutputvideo),"green")) bar = pyprind.ProgBar(int(expectedFrames),bar_char='░') with open(sqb_path,'rb') as f : try : for i in range(0, int(expectedFrames)): offset = struct.unpack('l', f.read(4)) #This variables are unused but file has to be read in a specific order to acess the valuable data # padding = f.read(4) # timestamp = struct.unpack('d', f.read(8)) f.read(4) struct.unpack('d', f.read(8)) #End of unused variables binfile = struct.unpack('i', f.read(4)) #This variables are unused but file has to be read in a specific order to acess the valuable data # padding = f.read(4) f.read(4) #End of unused variables bin_number = "_%0.5d.bin" % (binfile[0]) Index = ListCORRECTER_BINs.index(TrialName+bin_number) # print(offset) bin_path = os.path.join(pathstr,ListDAMAGED_BINs[Index]) # tiff_file_path = "%s_%0.5d.tif" %(tiff_path, i) f_bin = open(bin_path, 'rb') f_bin.seek(offset[0], os.SEEK_SET) bytes = f_bin.read(height*width*bpp) if bpp == 2: buffer = np.frombuffer(bytes, dtype=np.uint16) else: buffer = np.frombuffer(bytes, dtype=np.uint8) nparr2 = buffer.reshape(height, width) # cv2.imwrite(tiff_file_path, nparr2) f_bin.close() # imgplot = plt.imshow(nparr2,cmap='gray_r') # plt.show(imgplot) # print(np.shape(nparr2)) # input() # VideoArray[:,:,i] = nparr2 vid.write(np.uint8(nparr2)) bar.update() # for ImageIndex in range(np.size(VideoArray,2)): # print(ImageIndex) except Exception as e: vid.release() print(colored("Error : {} on file : {}".format(e,damagedVideopath),"red")) logger.error("Error : {} on file : {}".format(e,damagedVideopath)) return False, "binary file I/O" vid.release() del bar del cfg # del VideoArray gc.collect() print() print("Video compression {} sucessfull".format(damagedVideopath)) logger.info("Video compression {} sucessfull".format(damagedVideopath)) return True, "none"
from pyimagesearch.notifications import TwilioNotifier from pyimagesearch.utils import Conf from imutils.video import VideoStream from imutils.io import TempFile from datetime import datetime from datetime import date import numpy as np import argparse import imutils import signal import time import cv2 from cv2 import VideoWriter_fourcc import sys fourcc = VideoWriter_fourcc(*'avc1') # function to handle keyboard interrupt def signal_handler(sig, frame): print("[INFO] You pressed 'ctrl + c'! Closing refrigerator monitor" \ " application...") sys.exit(0) # construct the argument parser and parse the args ap = argparse.ArgumentParser() ap.add_argument("-c", "--conf", required=True, help="Path to the input configuration file")
def Compress_Tiffvideo(TiffFiles,OutputVideo, ** kwargs): logger = logging.getLogger("Compress_Tiffvideo") logger.setLevel(logging.INFO) TiffFiles = AlphaNum_Sort(TiffFiles) print("Treating video : {} at {}".format(os.path.basename(OutputVideo),os.path.dirname(OutputVideo))) if "alerts" in kwargs: alerts = kwargs.get("alerts") else: alerts = True if "fps" in kwargs: fps = kwargs.get("fps") else: fps = 30 if alerts : print(colored("Using default framerate (30 fps) as none was specified","blue")) logger.debug("Using default framerate (30 fps) as none was specified") if "codec" in kwargs: codec = kwargs.get("codec") else: codec = "MJPG" if alerts : print(colored("Using default codec (MJPG) as none was specified","blue")) logger.debug("Using default codec (MJPG) as none was specified") if "color" in kwargs: color = kwargs.get("color") else: color = False if alerts : print(colored("Interpreting data as greyscale images as no color info was specified","blue")) logger.debug("Interpreting data as greyscale images as no color info was specified") bar = pyprind.ProgBar(len(TiffFiles),bar_char='░') print("Processing a {} frames video".format(len(TiffFiles))) Index = 0 bar.update() for File in TiffFiles: image = imread(File, 0) if Index == 0: Index = 1 SIZE = np.shape(image) size = SIZE[1] , SIZE[0] fourcc = VideoWriter_fourcc(*codec) vid = VideoWriter(OutputVideo, fourcc, fps, size, color) vid.write(np.uint8(image)) else : vid.write(np.uint8(image)) bar.update() try : del bar vid.release() return True except Exception as e: print(colored("Error Compress_Tiffvideo 2: {} on file : {}".format(e,OutputVideo),"red")) logger.error("Error Compress_Tiffvideo 2: {} on file : {}".format(e,OutputVideo)) return False
def _btnTransfer(self): if self.content is None or self.style is None or not self.seted: return num_iter = int(self.entry_iter.get()) if self.preserv_color.get(): content_yuv = cvtColor(self.content.numpy().transpose((1, 2, 0)), COLOR_RGB2YUV) if self.record.get(): fps = int(num_iter / VIDEO_SEC) writer = VideoWriter(TMP_VIDEO, VideoWriter_fourcc(*'MP4V'), fps, (self.output.shape[3], self.output.shape[2])) writer.write( np.array(self.insertSign(self.getOutputImage()))[:, :, ::-1]) output = self.output optimizer = self.optimizer net = self.net cw = float(self.entry_cw.get()) sw = float(self.entry_sw.get()) cwl = [ float(self.entry_cwls[ly].get()) for ly in list(self.entry_cwls.keys()) ] swl = [ float(self.entry_swls[ly].get()) for ly in list(self.entry_swls.keys()) ] label = self.loss_label # RUN START. st = time.time() itr = self.run + num_iter while self.run < itr: r = self.run def closure(): output.data.clamp_(0, 1) optimizer.zero_grad() net(output) style_loss = 0 content_loss = 0 for i, sl in enumerate(net.style_losses): style_loss += sl.loss * swl[i] for i, cl in enumerate(net.content_losses): content_loss += cl.loss * cwl[i] content_loss *= cw style_loss *= sw #loss_hist = hist_loss(output.squeeze(0).clamp(0, 1), self.content.div(255.).cuda()) loss = content_loss + style_loss # + loss_hist loss.backward() """res = output.clone().detach().cpu().data.clamp_(0, 1).squeeze(0).mul(255.) loadfn(canvas, tensor2Image(res)) canvas.update()""" label[ 'text'] = 'Run: {} | Style Loss : {:4f} Content Loss: {:4f}'.format( r, style_loss.item(), content_loss.item()) label.update() return loss loss = optimizer.step(closure) res = tensor2Image(output.clone().detach().cpu().data.clamp_( 0, 1).squeeze(0).mul(255.)) if self.preserv_color.get(): yuv = cvtColor(np.array(res).astype('float32'), COLOR_RGB2YUV) yuv[:, :, 1:3] = content_yuv[:, :, 1:3] res = Image.fromarray( np.clip(cvtColor(yuv, COLOR_YUV2RGB), 0, 255).astype(np.uint8)) if self.record.get(): writer.write(np.array(self.insertSign(res))[:, :, ::-1]) load_image2canvas(self.canvas_output.canvas, res) self.canvas_output.canvas.update() self.run += 1 elp = time.time() - st if self.record.get(): writer.release() # RUN FINISHED. self.loss_label['text'] = 'Done. ' + self.loss_label['text'] self.loss_label.update() print('run time: {} m : {} s'.format(elp // 60, elp % 60))
def main(): # test_file = "/run/user/1000/gvfs/smb-share:server=cossartlab.local,share=picardoteam/Behavior Camera/p5_20_02_17/cam 1" # print(f"is dir {os.path.isdir(test_file)}") # return open_avi_for_test = False if open_avi_for_test: test_avi() return subject_id = "p8_20_02_27" # P12_20_01_20 p8_20_01_16 cam_folder_id_1 = "cam2" # "cam2" cam_folder_id_2 = "a001" # a000 a001 if cam_folder_id_2 is None: cam_folder_id = "20190430_a002" # ex cam1_a002, movie1, etc... else: cam_folder_id = f"{cam_folder_id_1}_{cam_folder_id_2}" tiffs_path_dir = '/media/julien/My Book/robin_tmp/cameras/' tiffs_path_dir = '/media/julien/My Book/robin_tmp/cameras/to_convert/' # tiffs_path_dir = '/media/julien/My Book/robin_tmp/cameras/basler_recordings/' # tiffs_path_dir = '/media/julien/dream team/camera/' tiffs_path_dir = '/media/julien/Not_today/hne_not_today/data/behavior_movies/to_convert/' # On NAS # tiffs_path_dir = '/run/user/1000/gvfs/smb-share:server=cossartlab.local,share=picardoteam/Behavior Camera/' if cam_folder_id_2 is not None: tiffs_path_dir = os.path.join(tiffs_path_dir, subject_id, cam_folder_id_1, cam_folder_id_2) # tiffs_path_dir = os.path.join(tiffs_path_dir, subject_id, cam_folder_id_2, cam_folder_id_1) else: tiffs_path_dir = os.path.join(tiffs_path_dir, subject_id, cam_folder_id) # print(f"is dir {os.path.isdir(tiffs_path_dir)}") if cam_folder_id_1 is None: cam_id = "22983298" elif cam_folder_id_1 == "cam1": cam_id = "22983298" else: cam_id = "23109588" # cam1: 22983298 cam2: 23109588 # results_path = '/media/julien/My Book/robin_tmp/cameras/' # results_path = os.path.join(results_path, subject_id) results_path = "/media/julien/Not_today/hne_not_today/data/behavior_movies/converted_so_far/" files_in_dir = [ item for item in os.listdir(tiffs_path_dir) if os.path.isfile(os.path.join(tiffs_path_dir, item)) and ( item.endswith("tiff") or item.endswith("tif")) and ( not item.startswith(".")) ] # files_in_dir = sorted_tiff_ls(tiffs_path_dir) # print(f"len(files_in_dir) {len(files_in_dir)}") # for file_name in files_in_dir[-1000:]: # print(f"{file_name}") files_in_dir_dict = SortedDict() for file_name in files_in_dir: index_ = file_name[::-1].find("_") frame_number = int(file_name[-index_:-5]) files_in_dir_dict[frame_number] = file_name # print(f"{file_name[-index_:-5]}") # break # looking for a gap between frames last_tiff_frame = 0 error_detected = False for tiff_frame, tiff_file in files_in_dir_dict.items(): if tiff_frame - 1 != last_tiff_frame: print( f"Gap between frame n° {last_tiff_frame} and {tiff_frame}. File {tiff_file}" ) error_detected = True last_tiff_frame = tiff_frame if error_detected: raise Exception("ERROR: gap between 2 frames") # keep the name of the tiffs files yaml_file_name = os.path.join( results_path, f"behavior_{subject_id}_cam_{cam_id}_{cam_folder_id}.yaml") with open(yaml_file_name, 'w') as outfile: yaml.dump(list(files_in_dir_dict.values()), outfile, default_flow_style=False) # raise Exception("TEST YAML") # # leave only regular files, insert creation date # entries = ((stat[ST_CTIME], path) # for stat, path in entries if S_ISREG(stat[ST_MODE])) # # NOTE: on Windows `ST_CTIME` is a creation date # # but on Unix it could be something else # # NOTE: use `ST_MTIME` to sort by a modification date # # for cdate, path in sorted(entries): # print(time.ctime(cdate), os.path.basename(path)) # sort by alaphabatical order size_avi = None vid_avi = None fps_avi = 20 avi_file_name = os.path.join( results_path, f"behavior_{subject_id}_cam_{cam_id}_{cam_folder_id}_fps_{fps_avi}.avi" ) print( f"creating behavior_{subject_id}_cam_{cam_id}_{cam_folder_id}_fps_{fps_avi}.avi from {len(files_in_dir_dict)} tiff files" ) is_color = True # put fourcc to 0 for no compression # fourcc = 0 fourcc = VideoWriter_fourcc(*"XVID") # fourcc = VideoWriter_fourcc(*"MPEG") # https://stackoverflow.com/questions/44947505/how-to-make-a-movie-out-of-images-in-python start_time = time() for tiff_frame, tiff_file in files_in_dir_dict.items(): if (tiff_frame > 0) and (tiff_frame % 5000 == 0): print(f"{tiff_frame} frames done") # img = PIL.Image.open(os.path.join(tiffs_path_dir, tiff_file)) # img = np.array(img) if vid_avi is None: if size_avi is None: img = PIL.Image.open(os.path.join(tiffs_path_dir, tiff_file)) img = np.array(img) print(f"img.shape {img.shape}") size_avi = img.shape[1], img.shape[0] # vid_avi = VideoWriter(avi_file_name, fourcc, float(fps_avi), size_avi, is_color) vid_avi = VideoWriter(avi_file_name, fourcc, fps_avi, size_avi, is_color) # vid_avi.write(img) vid_avi.write(imread(os.path.join(tiffs_path_dir, tiff_file))) cv2.destroyAllWindows() vid_avi.release() time_to_convert = time() - start_time print(f"time_to_convert: {time_to_convert} sec")
def ESRGAN(self, img_path, img_name, is_video=False): # Image reads if is_video == False: try: image = skimage.io.imread( img_path) # problems with strange shapes if image.ndim != 3: image = skimage.color.gray2rgb( image) # convert to rgb if greyscale if image.shape[-1] == 4: image = image[..., :3] # strip alpha channel except Exception as e: print( "ERROR in detector.ESRGAN: Image read. Skipping. image_path=", img_path) print(e) return # Run detection first r = self.model.detect([image], verbose=0)[0] # Remove bars from detection; class 1 if len(r["scores"]) == 0: print("Skipping image with no detection") return remove_indices = np.where(r['class_ids'] != 2) new_masks = np.delete(r['masks'], remove_indices, axis=2) # load image from esrgan gan_img_path = self.out_path + img_name[:-4] + '.png' gan_image = skimage.io.imread(gan_img_path) gan_image = resize(gan_image, (image.shape[1], image.shape[0])) # Splice newly enhanced mosaic area over original image fin_img = self.splice(image, new_masks, gan_image) try: # Save output, now force save as png file_name = self.fin_path + img_name[:-4] + '.png' skimage.io.imsave(file_name, fin_img) except Exception as e: print("ERROR in ESRGAN: Image write. Skipping. image_path=", img_path, e) else: # Video capture try: video_path = img_path vcapture = VideoCapture(video_path) width = int(vcapture.get(CAP_PROP_FRAME_WIDTH)) height = int(vcapture.get(CAP_PROP_FRAME_HEIGHT)) fps = vcapture.get(CAP_PROP_FPS) print("Detected fps:", fps) # Define codec and create video writer, video output is purely for debugging and educational purpose. Not used in decensoring. file_name = img_name[:-4] + "_decensored.mp4" vwriter = VideoWriter(file_name, VideoWriter_fourcc(*'mp4v'), fps, (width, height)) except Exception as e: print("ERROR in ESRGAN: video read and init.", e) return count = 0 success = True print( "Video read complete. Starting video phase 2: detection + splice" ) while success: print("frame: ", count) # Read next image success, image = vcapture.read() if success: # OpenCV returns images as BGR, convert to RGB image = image[..., ::-1] # Detect objects r = self.model.detect([image], verbose=0)[0] if len(r["scores"]) == 0: print("Skipping frame with no detection") # Still need to write image to vwriter image = image[..., ::-1] vwriter.write(image) count += 1 continue # Remove unwanted class, code from https://github.com/matterport/Mask_RCNN/issues/1666 remove_indices = np.where( r['class_ids'] != 2) # remove bars: class 1 new_masks = np.delete(r['masks'], remove_indices, axis=2) gan_img_path = self.out_path + img_name[:-4] + str( count).zfill(6) + '.png' gan_image = skimage.io.imread(gan_img_path) gan_image = resize(gan_image, (image.shape[1], image.shape[0])) fin_img = self.splice(image, new_masks, gan_image) fin_img = fin_img[ ..., ::-1] # reverse RGB to BGR for video writing # Add image to video writer vwriter.write(fin_img) fin_img = 0 # not sure if this does anything haha count += 1 vwriter.release() print('Video: Phase 2 complete!')
def simple_agent_1(env): if not env.started: return 1 ball_pos = env.ball.center paddle_pos = env.paddle_1.center if paddle_pos[1] < ball_pos[1]: return 2 else: return 3 vid = VideoWriter('demo_pong.avi', VideoWriter_fourcc(*"XVID"), float(30), (160, 210), False) env = Pong({ 'max_step': 1000, 'lifes': 7, 'ball_speed': [5, -2], # 'ball_size': [5, 5], # 'ball_color': 200, # 'paddle_width': 50, 'paddle_speed': 5 }) for ep in range(1): obs = env.reset() for t in itertools.count():
import cv2 from cv2 import VideoWriter,VideoWriter_fourcc,imread,resize import os img_root="/home/chengyuan/PycharmProjects/Everybody_Dance_Now_Sun//datasets/test_B/" fps=50 fourcc=VideoWriter_fourcc(*"MJPG") videoWriter=cv2.VideoWriter("The_Way_You_Want_to_Save_Your_Vid.avi",fourcc,fps,(512,288)) im_names=os.listdir(img_root) for im_name in range(len(im_names)): frame=cv2.imread(img_root+'{:05d}.png'.format(im_name)) print(im_name+' '+frame.size) videoWriter.write(frame) videoWriter.release()
def make_video(images, images_paths, output_path, fps=1.0, size=None, is_color=True, format='MJPG'): """ Create a video from a list of images. @param outvid output video @param image_paths list of images to use in the video @param fps frame per second @param size size of each frame @param is_color color @param format see http://www.fourcc.org/codecs.php @return see http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html The function relies on http://opencv-python-tutroals.readthedocs.org/en/latest/. By default, the video will have the size of the first image. It will resize every image to this size before adding them to the video. """ video_name = output_path + '.avi' fourcc = VideoWriter_fourcc(*'MJPG') vid = None for i in range(len(images_paths)): image_path = images_paths[i] img = images[i] if vid is None: if size is None: size = img.shape[1], img.shape[0] vid = VideoWriter(video_name, fourcc, float(fps), size, is_color) if size[0] != img.shape[1] or size[1] != img.shape[0]: img = resize(img, size) widthScale = img.shape[ 0] / 500 #(font is scaled for Upernavik, which is 500 pixels wide) heightScale = img.shape[ 1] / 500 #(font is scaled for Upernavik, which is 500 pixels wide) bottomLeftCornerOfText = (4, img.shape[1] - int(34 * heightScale)) font = cv2.FONT_HERSHEY_SIMPLEX fontScale = 1 * widthScale fontColor = (0, 192, 216) fontColorBorder = (0, 0, 0) lineType = 2 thickness = 2 thicknessBorder = 8 date, year, month, day = get_date(image_path) #text_width, text_height = cv2.getTextSize(date, font, fontScale, lineType)[0] cv2.putText(img, date, bottomLeftCornerOfText, font, fontScale, fontColorBorder, thickness=thicknessBorder, lineType=lineType) cv2.putText(img, date, bottomLeftCornerOfText, font, fontScale, fontColor, thickness=thickness, lineType=lineType) img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) vid.write(img) vid.release() return
vid_1 = '/media/zaigham/SSD_1TB/gits/context_encoder_pytorch/Video_test/Codes and Trained Network/patches/video/tennis.avi' # histogram_eq = 0 # adaptive_hist_eq = 1 # videos = ['20180713_101224.mp4', ''] # file_list = os.listdir(source_dir) count = 0 dilation_track_var = 0 vidcap_1 = cv2.VideoCapture(vid_1) success, image_1 = vidcap_1.read() cv2.putText(image_1, "No_hist", (1, 1), cv2.FONT_HERSHEY_SIMPLEX, 10, (255, 255, 255)) format = "MP4V" fourcc = VideoWriter_fourcc(*format) # fourcc = cv2.VideoWriter_fourcc('mp4') # Be sure to use lower case # out = cv2.VideoWriter('stitched.mp4', fourcc, 30, (1920*2, 1080)) out_path = '/media/zaigham/SSD_1TB/gits/context_encoder_pytorch/Video_test/Codes and Trained Network/patches/final/' size = image_1.shape[1], image_1.shape[0] vid_writer = VideoWriter( '/media/zaigham/SSD_1TB/gits/context_encoder_pytorch/Video_test/Codes and Trained Network/patches/video/tennis2.mp4', fourcc, float(30), size, True) while success: #for ch in range(3): #image2[:,:,ch] = cv2.equalizeHist(image[:, :, ch]) # image = np.concatenate((image_1, image_2), axis=0)
def change_video_resolution(file_name, new_width=1024, new_height=None, using_croping=True): """ Change a video resolution from 1920*1200 to 1792*1024 Or add black pixels and extend to : 2048*1280 Returns: """ # TO READ cap = VideoCapture(file_name) # Check if camera opened successfully if cap.isOpened() == False: print("Error opening video stream or file") return length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps_avi = cap.get(cv2.CAP_PROP_FPS) # if new_width or new_height is None, then choose the one at None so we keep the ratio if new_width is None and new_height is None: raise Exception("Width and height are None") if new_width is None: new_width = (width * new_height) / height if new_height is None: new_height = (height * new_width) / width new_avi_file_name = f"res_dpk_{new_width}_{new_height}_" + file_name new_avi_file_name = os.path.join(fpath, new_avi_file_name) # for writing is_color = True # put fourcc to 0 for no compression # fourcc = 0 fourcc = VideoWriter_fourcc(*"XVID") size_avi = (new_width, new_height) vid_avi = VideoWriter(new_avi_file_name, fourcc, fps_avi, size_avi, is_color) n_frames = 0 # Read until video is completed while cap.isOpened(): # Capture frame-by-frame ret, frame = cap.read() if n_frames == 0: print(f"frame.shape {frame.shape}") if ret == True: frame = change_frame_resolution(frame=frame, new_width=new_width, new_height=new_height, using_croping=using_croping) if n_frames == 0: print(f"new frame.shape {frame.shape}") n_frames += 1 vid_avi.write(frame) # Break the loop else: break if n_frames % 5000 == 0: print(f"{n_frames} converted over {length}") print(f"{n_frames} frames converted") # Closes all the frames cv2.destroyAllWindows() # When everything done, release the video capture object vid_avi.release() cap.release() print(f"new_avi_file_name {new_avi_file_name}") return new_avi_file_name
You should have received a copy of the GNU General Public License along with RS Companion. If not, see <https://www.gnu.org/licenses/>. Author: Phillip Riskin Date: 2020 Project: Companion App Company: Red Scientific https://redscientific.com/index.html """ from cv2 import CAP_DSHOW, VideoWriter_fourcc from enum import Enum, auto cap_backend = CAP_DSHOW cap_temp_codec = VideoWriter_fourcc(*'mjpg') cap_codec = VideoWriter_fourcc(*'MJPG') common_resolutions = [ (640.0, 480.0), # 4:3 # (640.0, 640.0), # 1:1 # (800.0, 600.0), # 4:3 # (960.0, 720.0), # 4:3 (1024.0, 768.0), # 4:3 # (1248.0, 1536.0), # Non-standard (1280.0, 720.0), # 16:9 # (1280.0, 960.0), # 4:3 # (1440.0, 1080.0), # 4:3 # (1600.0, 900.0), # 16:9 # (1600.0, 1200.0), # 4:3 (1920.0, 1080.0), # 16:9
def main(title: str, skip_crawling: bool): title = str(title) if (not skip_crawling): crawler.main(title) print("Start to create video for {}".format(title)) fps = config['fps'] width = config['width'] height = config['height'] # Paths output_dir = os.sep.join([".", "output"]) if not os.path.exists(output_dir): print("Folder", output_dir, 'does not exist. Creating...') os.makedirs(output_dir) resource_dir = os.sep.join([".", "resource", title]) # Assets result = text_processing.load_data(title) title_font = ImageFont.truetype(config['title_font'], config['title_font_size'], encoding="utf-8") content_font = ImageFont.truetype(config['content_font'], config['content_font_size'], encoding="utf-8") title_wrapper = text_processing.Wrapper(title_font) content_wrapper = text_processing.Wrapper(content_font) audio_clip = AudioFileClip( os.sep.join([resource_dir, "audio", title + ".mp3"])) # Video Properties fourcc = VideoWriter_fourcc(*'mp4v') video = VideoWriter(os.sep.join([output_dir, title + '_complex_temp.mp4']), fourcc, float(fps), (width, height)) # Create Video keys = list(map(int, result.keys())) if 0 not in keys: keys.append(0) frame = image_processing.generate_cv2_blank_frame( "", "", (width, height), title_wrapper, content_wrapper, title_font, content_font) else: key = "0" image = os.sep.join( [resource_dir, str(key) + result[key]['image_suffix']]) header = result[key]['header'] content = result[key]['content'] print("标题:{}".format(header)) if (result[key]['image_suffix'] in ['.gif', '.GIF']): frame = image_processing.generate_cv2_blank_frame( header, content, (width, height), title_wrapper, content_wrapper, title_font, content_font) else: frame = image_processing.generate_cv2_frame( image, header, content, (width, height), title_wrapper, content_wrapper, title_font, content_font) #os.remove(image) keys.sort() # Set last picture to be 20 seconds long keys.append(math.ceil(audio_clip.duration)) #print(keys) # Number of frames in this video total_length = (200 if config['test'] else keys[len(keys) - 1]) * fps index = 0 for i in range(total_length): if (index > len(keys) - 1): frame = image_processing.generate_cv2_blank_frame( "", "", (width, height), title_wrapper, content_wrapper, title_font, content_font) elif (i / fps) >= keys[index + 1]: index += 1 print("Processing {} frames out of {}".format( index, len(keys) - 1)) key = str(keys[index]) image = os.sep.join( [resource_dir, str(key) + result[key]['image_suffix']]) header = result[key]['header'] content = result[key]['content'] print("标题:{}".format(header)) if (result[key]['image_suffix'] in ['.gif', '.GIF']): frame = image_processing.generate_cv2_blank_frame( header, content, (width, height), title_wrapper, content_wrapper, title_font, content_font) else: frame = image_processing.generate_cv2_frame( image, header, content, (width, height), title_wrapper, content_wrapper, title_font, content_font) #os.remove(image) else: pass video.write(frame) video.release() video_clip = VideoFileClip( os.sep.join([output_dir, title + "_complex_temp.mp4"])) print(video_clip.duration) video_clip.audio = audio_clip if config['enable_logo']: logo_clip = video_processing.load_logo(os.sep.join( [".", "util", config['logo_name']]), duration=video_clip.duration) video_clip = video_processing.add_logo(video_clip, logo_clip) if config['test']: video_clip = video_clip.subclip(0, min(200, video_clip.duration)) video_clip.write_videofile(os.sep.join( [output_dir, title + "_complex.mp4"]), fps=fps) print("{} finished!".format(title)) os.remove(os.sep.join([output_dir, title + "_complex_temp.mp4"]))