if not EVL.get_events(): print("No events were found. nothing to amplify - exiting.") sys.exit() EVENT_LIST = _rounded_evl_list(EVL, DURATION) EVENT_LIST = sorted(EVENT_LIST, key=lambda event: event[0]) # Check for overlaps in events # TODO Treat overlapping events as single event? for i in range(1, len(EVENT_LIST)): if EVENT_LIST[i][0] < EVENT_LIST[i - 1][1]: raise OverLappingEventsError( "Events {0} and {1} overlap.\n".format(i - 1, i) + "Amplification would be ambiguous") ORIGINAL = pyvideomeg.VideoData(VIDEO_FILE) AMPLIFIED = pyvideomeg.VideoFile(op.join( TREE, "{0}.video.amp.dat".format(FNAME)), ORIGINAL.ver, site_id=ORIGINAL.site_id, is_sender=ORIGINAL.is_sender) FPS = (len(ORIGINAL.ts) - 1) * 1000. / (ORIGINAL.ts[-1] - ORIGINAL.ts[0]) # Start Matlab-engine for amplification # Won't work if internet connection is not available - Matlab cannot check license. try: _ENG = matlab.engine.start_matlab() # Matlab doesn't specify this exception except Exception:
import numpy as np import PIL import cStringIO import struct import pyvideomeg FNAME_1 = '/home/andrey/Desktop/test/3/videoMEG_sync_test.video_01.dat' FNAME_2 = '/home/andrey/Desktop/test/3/videoMEG_sync_test.video_02.dat' OUT_FNAME = '/home/andrey/Desktop/test/3/videoMEG_sync_test.video_merged.dat' FRAME_SZ = (640, 480) file_1 = pyvideomeg.VideoData(FNAME_1) file_2 = pyvideomeg.VideoData(FNAME_2) out_file = out_file = open(OUT_FNAME, 'wb') out_file.write('ELEKTA_VIDEO_FILE') out_file.write(struct.pack('I', 1)) # file format version for i in range(len(file_1.ts)): # find the closest matching frame from the second file file2_indx = np.argmin(np.abs(file_2.ts - file_1.ts[i])) # merge the images im0 = PIL.Image.open(cStringIO.StringIO(file_1.get_frame(i))) im1 = PIL.Image.open(cStringIO.StringIO(file_2.get_frame(file2_indx))) im0.resize((FRAME_SZ[0]//2, FRAME_SZ[1]), PIL.Image.ANTIALIAS)
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import sys import pyvideomeg try: vf = pyvideomeg.VideoData(sys.argv[1]) is_audio = False except pyvideomeg.UnknownVersionError: print('The file %s has unknown version' % sys.argv[1]) sys.exit(1) except: try: af = pyvideomeg.AudioData(sys.argv[1]) is_audio = True except pyvideomeg.UnknownVersionError: print('The file %s has unknown version' % sys.argv[1]) sys.exit(1) except: print('The file %s seems to be neither video nor audio file' % sys.argv[1]) sys.exit(1)
MENCODER_LOG_FILE = '/dev/null' FONT_FILE = '/usr/share/fonts/truetype/ubuntu-font-family/UbuntuMono-R.ttf' FONT_SZ = 20 #-------------------------------------------------------------------------- # Make a temporary folder for storing frames and audio files # tmp_fldr = tempfile.mkdtemp() print('Using \'%s\' for storing temporary files' % tmp_fldr) fnt = ImageFont.truetype(FONT_FILE, FONT_SZ) #-------------------------------------------------------------------------- # Read the video file and extract frames to multiple jpg files # vid_file = pyvideomeg.VideoData(sys.argv[1]) for i in range(len(vid_file.ts)): img = Image.open(BytesIO(vid_file.get_frame(i))) draw = ImageDraw.Draw(img) draw.text( (10, 0), '%i : %s' % (vid_file.ts[i], pyvideomeg.ts2str(vid_file.ts[i])), font=fnt, fill='black') img.save('%s/%08i.jpg' % (tmp_fldr, i)) #-------------------------------------------------------------------------- # If no audio file, encode and exit # if len(sys.argv) == 3:
# Load the data # raw = mne.io.Raw(MEG_FNAME, allow_maxshield=True) # load the timing channel picks_timing = mne.pick_types(raw.info, meg=False, include=[TIMING_CH]) dt_timing = raw[picks_timing, :][0].squeeze() # load the MEG channel picks_meg = mne.pick_types(raw.info, meg=False, include=[MEG_CH]) meg = raw[picks_meg, :][0].squeeze() # compute the timestamps for the MEG channel meg_ts = pyvideomeg.comp_tstamps(dt_timing, raw.info['sfreq']) vid_file_1 = pyvideomeg.VideoData(VIDEO_FNAME_1) vid_file_2 = pyvideomeg.VideoData(VIDEO_FNAME_2) aud_file = pyvideomeg.AudioData(AUDIO_FNAME) audio, audio_ts = aud_file.format_audio() audio = audio[0, :].squeeze() # use only the first audio channel #-------------------------------------------------------------------------- # Make the pics # plt.ioff() # don't pop up the figure windows ts_scale = np.diff(vid_file_1.ts).max() * (WIND_WIDTH + 0.1) meg_scale = np.percentile(np.abs(meg), SCALE_PRCTILE_MEG) * 1.1 aud_scale = np.percentile(np.abs(audio), SCALE_PRCTILE_AUDIO) * 1.1
MENCODER_LOG_FILE = '/dev/null' FONT_FILE = '/usr/share/fonts/truetype/ubuntu-font-family/UbuntuMono-R.ttf' FONT_SZ = 20 #-------------------------------------------------------------------------- # Make a temporary folder for storing frames # tmp_fldr = tempfile.mkdtemp() print('Using \'%s\' for storing temporary files' % tmp_fldr) #-------------------------------------------------------------------------- # Read video files and merge the frames # vid_file_1 = pyvideomeg.VideoData(sys.argv[1]) vid_file_2 = pyvideomeg.VideoData(sys.argv[2]) # make sure that the files overlap in time assert((vid_file_1.ts[0] < vid_file_2.ts[-1]) & (vid_file_2.ts[0] < vid_file_1.ts[-1])) # skip the unmatched frames (if any) at the beginning of video_file_1 i = 0 while min(abs(vid_file_1.ts[i+1] - vid_file_2.ts)) < min(abs(vid_file_1.ts[i] - vid_file_2.ts)): i += 1 fnt = ImageFont.truetype(FONT_FILE, FONT_SZ) indx = [] err = [] first_i = i