示例#1
0
def main(args):
    video_dev_id = args.video_id
    if len(video_dev_id) < 4:
        # Assume that urls have at least 4 characters
        video_dev_id = int(video_dev_id)

    print("Video id:", video_dev_id)
    print("Res:", args.res)

    mode = args.res
    cam_res = int(mode)
    width = cam_res  # square output
    height = cam_res
    shape = (height, width)
    #cam_res = 256 # <- can be done, but spynnaker doesn't suppor such resolution

    data_shift = uint8(log2(cam_res))
    up_down_shift = uint8(2 * data_shift)
    data_mask = uint8(cam_res - 1)

    polarity = POLARITY_DICT[MERGED_POLARITY]
    output_type = OUTPUT_TIME
    history_weight = 1.0
    threshold = 12  # ~ 0.05*255
    max_threshold = 180  # 12*15 ~ 0.7*255

    scale_width = 0
    scale_height = 0
    col_from = 0
    col_to = 0

    curr = np.zeros(shape, dtype=int16)
    ref = 128 * np.ones(shape, dtype=int16)
    spikes = np.zeros(shape, dtype=int16)
    diff = np.zeros(shape, dtype=int16)
    abs_diff = np.zeros(shape, dtype=int16)

    # just to see things in a window
    spk_img = np.zeros((height, width, 3), uint8)

    num_bits = 6  # how many bits are used to represent exceeded thresholds
    num_active_bits = 2  # how many of bits are active
    log2_table = gs.generate_log2_table(num_active_bits,
                                        num_bits)[num_active_bits - 1]
    spike_lists = None
    pos_spks = None
    neg_spks = None
    max_diff = 0

    # -------------------------------------------------------------------- #
    # inhibition related                                                   #

    inh_width = 2
    is_inh_on = True
    inh_coords = gs.generate_inh_coords(width, height, inh_width)

    # -------------------------------------------------------------------- #
    # camera/frequency related                                             #

    video_dev = cv2.VideoCapture(0)  # webcam
    #video_dev = cv2.VideoCapture('/path/to/video/file') # webcam

    print(video_dev.isOpened())

    #ps3 eyetoy can do 125fps
    try:
        video_dev.set(cv2.CAP_PROP_FPS, 125)
    except:
        pass

    fps = video_dev.get(cv2.CAP_PROP_FPS)
    if fps == 0.0:
        fps = 125.0
    max_time_ms = int(1000. / float(fps))

    #---------------------- main loop -------------------------------------#

    WINDOW_NAME = 'spikes'
    cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_AUTOSIZE)
    cv2.startWindowThread()

    is_first_pass = True
    start_time = time.time()
    end_time = 0
    frame_count = 0
    while (True):
        # get an image from video source
        if is_first_pass:
            curr[:], scale_width, scale_height, col_from, col_to = grab_first(
                video_dev, cam_res)
            is_first_pass = False
        else:
            curr[:] = grab_frame(video_dev, scale_width, scale_height,
                                 col_from, col_to)

        # do the difference
        diff[:], abs_diff[:], spikes[:] = gs.thresholded_difference(
            curr, ref, threshold)

        # inhibition ( optional )
        if is_inh_on:
            spikes[:] = gs.local_inhibition(spikes, abs_diff, inh_coords,
                                            width, height, inh_width)

        # update the reference
        ref[:] = gs.update_reference_time_binary_thresh(
            abs_diff, spikes, ref, threshold, max_time_ms, num_active_bits,
            history_weight, log2_table)

        # convert into a set of packages to send out
        neg_spks, pos_spks, max_diff = gs.split_spikes(spikes, abs_diff,
                                                       polarity)

        # this takes too long, could be parallelized at expense of memory
        spike_lists = gs.make_spike_lists_time_bin_thr(
            pos_spks, neg_spks, max_diff, up_down_shift, data_shift, data_mask,
            max_time_ms, threshold, max_threshold, num_bits, log2_table)

        spk_img[:] = gs.render_frame(spikes, curr, cam_res, cam_res, polarity)
        cv2.imshow(WINDOW_NAME, spk_img.astype(uint8))

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        end_time = time.time()

        if end_time - start_time >= 1.0:
            print("%d frames per second" % (frame_count))
            frame_count = 0
            start_time = time.time()
        else:
            frame_count += 1

    cv2.destroyAllWindows()
    cv2.waitKey(1)
示例#2
0
    def read_video_source(self):
        data_shift = uint8(log2(self.cam_res))
        up_down_shift = uint8(2 * data_shift)
        data_mask = uint8(self.cam_res - 1)

        output_spikes = []

        # Default values from pyDVS
        history_weight = 1.0
        threshold = 12  # ~ 0.05*255
        max_threshold = 180  # 12*15 ~ 0.7*255

        curr = np.zeros(self.shape, dtype=int16)
        ref = 128 * np.ones(self.shape, dtype=int16)
        spikes = np.zeros(self.shape, dtype=int16)
        diff = np.zeros(self.shape, dtype=int16)
        abs_diff = np.zeros(self.shape, dtype=int16)

        # just to see things in a window
        spk_img = np.zeros((self.cam_res, self.cam_res, 3), uint8)

        num_bits = 6  # how many bits are used to represent exceeded thresholds
        num_active_bits = 2  # how many of bits are active
        log2_table = gs.generate_log2_table(num_active_bits,
                                            num_bits)[num_active_bits - 1]
        spike_lists = None
        pos_spks = None
        neg_spks = None
        max_diff = 0

        # -------------------------------------------------------------------- #
        # inhibition related                                                   #

        inh_width = 2
        is_inh_on = self.inhibition
        inh_coords = gs.generate_inh_coords(self.cam_res, self.cam_res,
                                            inh_width)

        if self.video_device != 'webcam':
            video_dev = cv2.VideoCapture(self.video_device)
            self.channel = 'VIDEO'
            print('File opened correctly:', video_dev.isOpened())
        else:
            video_dev = cv2.VideoCapture(0)  # webcam
            print('Webcam working:', video_dev.isOpened())

        fps = video_dev.get(cv2.CAP_PROP_FPS)
        frame_time_ms = int(1000. / float(fps))
        self.time_bin_ms = frame_time_ms // num_bits

        if self.output_video:
            fourcc = cv2.VideoWriter_fourcc(*'MP42')
            self.video_writer_path = self.output_video + '_video.avi'
            video_writer = cv2.VideoWriter(self.video_writer_path, fourcc, fps,
                                           self.shape)

        WINDOW_NAME = 'DVS Emulator'
        cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_AUTOSIZE)
        cv2.startWindowThread()

        # if self.output_type == OUTPUT_TIME or self.output_type == OUTPUT_RATE:
        #     num_bits = np.floor(frame_time_ms)
        # else:
        #     num_bits = 5.

        output_spikes = []
        output_spikes_tuple = []

        is_first_pass = True
        start_time = time.time()
        end_time = 0
        frame_count = 0

        while True:
            # get an image from video source
            if is_first_pass:
                curr[:], scale_width, scale_height, col_from, col_to, frame \
                    = self.grab_first(video_dev, self.cam_res, self.channel)
                is_first_pass = False
            else:
                read_correctly, raw = video_dev.read()
                if not read_correctly:
                    break
                curr[:], frame = self.grab_frame(raw, scale_width,
                                                 scale_height, col_from,
                                                 col_to, self.channel)

            # do the difference
            diff[:], abs_diff[:], spikes[:] = gs.thresholded_difference(
                curr, ref, threshold)

            # inhibition ( optional )
            if is_inh_on:
                spikes[:] = gs.local_inhibition(spikes, abs_diff, inh_coords,\
                                                     self.cam_res, self.cam_res, inh_width)

            # update the reference
            ref[:] = self.update_ref(self.output_type, abs_diff, spikes, ref,
                                     threshold, frame_time_ms, num_bits,
                                     history_weight, log2_table)

            # convert into a set of packages to send out
            neg_spks, pos_spks, max_diff = gs.split_spikes(
                spikes, abs_diff, self.polarity)

            spike_lists = self.make_spikes_lists(self.output_type,
                                                 pos_spks,
                                                 neg_spks,
                                                 max_diff,
                                                 up_down_shift,
                                                 data_shift,
                                                 data_mask,
                                                 frame_time_ms,
                                                 threshold,
                                                 max_threshold,
                                                 num_bits,
                                                 log2_table,
                                                 key_coding=self.key_coding)

            spk_img[:] = gs.render_frame(spikes, curr, self.cam_res,
                                         self.cam_res, self.polarity)
            cv2.imshow(WINDOW_NAME, spk_img.astype(uint8))

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

            end_time = time.time()

            # Compute frames per second
            if end_time - start_time >= 1.0:
                print('{} frames per second'.format(frame_count))
                frame_count = 0
                start_time = time.time()
            else:
                frame_count += 1

            # Write spikes out in correct format
            time_index = 0
            for spk_list in spike_lists:
                for spk in spk_list:
                    output_spikes.append('{},{:f}'.format(
                        spk, self.sim_time + time_index))
                    output_spikes_tuple.append(
                        (spk, self.sim_time + time_index))
                time_index += self.time_bin_ms

            self.sim_time += frame_time_ms

            # write the frame
            if self.output_video:
                video_writer.write(
                    cv2.resize(spk_img.astype(uint8),
                               (int(self.cam_res), int(self.cam_res))))

        if self.output_video:
            video_writer.release()

        video_dev.release()

        cv2.waitKey(1)
        cv2.destroyAllWindows()

        self.output_spikes = output_spikes[:]
        self.output_spikes_tuple = output_spikes_tuple[:]
示例#3
0
num_bits = 6  # how many bits are used to represent exceeded thresholds
num_active_bits = 2  # how many of bits are active
log2_table = gs.generate_log2_table(num_active_bits,
                                    num_bits)[num_active_bits - 1]
spike_lists = None
pos_spks = None
neg_spks = None
max_diff = 0

# -------------------------------------------------------------------- #
# inhibition related                                                   #

inh_width = 2
is_inh_on = True
inh_coords = gs.generate_inh_coords(width, height, inh_width)

# -------------------------------------------------------------------- #
# camera/frequency related                                             #

video_dev = cv2.VideoCapture(video_dev_id)  # webcam
#video_dev = cv2.VideoCapture('/path/to/video/file') # webcam

print(video_dev.isOpened())

#ps3 eyetoy can do 125fps
try:
    video_dev.set(cv2.CAP_PROP_FPS, 125)
except:
    pass
示例#4
0
log_time_code = (output_type == OUTPUT_TIME_BIN_THR) or \
                (output_type == OUTPUT_TIME_BIN)
print("using log spike time coding? %s" % (log_time_code))
history_weight = 0.99
behaviour = VirtualCam.BEHAVE_MICROSACCADE
max_dist = 1
data_shift = uint8(np.log2(cam_w))
flag_shift = uint8(2 * data_shift)
data_mask = uint8(cam_w - 1)

num_spikes = 1
log2_table = gs.generate_log2_table(num_spikes, int(num_bins))[0]

inh_width = 2
is_inh_on = False
inh_coords = gs.generate_inh_coords(cam_w, cam_w, inh_width)

thresh = int((2**8 - 1) * 0.05)

dir_name = "./mnist_spikes/mnist_behave_%s_pol_%s_enc_%s_thresh_%d_hist_%d_inh_%s___%d_frames_at_%dfps_%dx%d_res_spikes"%\
            (behaviour, polarity_name, output_type, thresh, int(history_weight*100), \
             is_inh_on, frames_per_image, cam_fps, cam_w, cam_w)

print(dir_name)

if not os.path.exists(dir_name):
    os.makedirs(dir_name)

dir_name = "%s/%s" % (dir_name, setname)

if not os.path.exists(dir_name):
示例#5
0
log_time_code = (output_type == OUTPUT_TIME_BIN_THR) or \
                (output_type == OUTPUT_TIME_BIN)
print("using log spike time coding? %s"%(log_time_code))
history_weight = 0.99
behaviour = VirtualCam.BEHAVE_MICROSACCADE
max_dist = 1
data_shift = uint8(np.log2(cam_w))
flag_shift = uint8(2*data_shift)
data_mask  = uint8(cam_w - 1)

num_spikes = 1
log2_table = gs.generate_log2_table(num_spikes, int(num_bins))[0]

inh_width = 2
is_inh_on = False
inh_coords = gs.generate_inh_coords(cam_w, cam_w, inh_width)

thresh = int( (2**8 - 1)*0.05 )

dir_name = "./mnist_spikes/mnist_behave_%s_pol_%s_enc_%s_thresh_%d_hist_%d_inh_%s___%d_frames_at_%dfps_%dx%d_res_spikes"%\
            (behaviour, polarity_name, output_type, thresh, int(history_weight*100), \
             is_inh_on, frames_per_image, cam_fps, cam_w, cam_w)

print(dir_name)

if not os.path.exists(dir_name):
    os.makedirs(dir_name)

dir_name = "%s/%s"%(dir_name, setname)

if not os.path.exists(dir_name):
def main(args):
    video_dev_id = args.video_id

    if len(video_dev_id) < 4:
        # Assume that urls have at least 4 characters
        video_dev_id = int(video_dev_id)

    cam_res = int(args.res)
    width = cam_res  # square output
    height = cam_res
    shape = (height, width)
    channel = args.channel

    data_shift = uint8(log2(cam_res))
    up_down_shift = uint8(2 * data_shift)
    data_mask = uint8(cam_res - 1)

    polarity = POLARITY_DICT[MERGED_POLARITY]
    output_type = OUTPUT_TIME_BIN_THR
    history_weight = 1.0
    threshold = 12  # ~ 0.05*255
    max_threshold = 180  # 12*15 ~ 0.7*255

    scale_width = 0
    scale_height = 0
    col_from = 0
    col_to = 0

    print()
    print('Channel:', channel)
    print('Polarity:', polarity)
    print('Output Type:', output_type)
    print('Resolution:', cam_res)
    print('Video id:', video_dev_id)
    print()

    curr = np.zeros(shape, dtype=int16)
    ref = 128 * np.ones(shape, dtype=int16)
    spikes = np.zeros(shape, dtype=int16)
    diff = np.zeros(shape, dtype=int16)
    abs_diff = np.zeros(shape, dtype=int16)

    # just to see things in a window
    spk_img = np.zeros((height, width, 3), uint8)

    num_bits = 6  # how many bits are used to represent exceeded thresholds
    num_active_bits = 2  # how many of bits are active
    log2_table = gs.generate_log2_table(num_active_bits,
                                        num_bits)[num_active_bits - 1]
    spike_lists = None
    pos_spks = None
    neg_spks = None
    max_diff = 0

    # -------------------------------------------------------------------- #
    # inhibition related                                                   #

    inh_width = 2
    is_inh_on = True
    inh_coords = gs.generate_inh_coords(width, height, inh_width)

    # -------------------------------------------------------------------- #
    # camera/frequency related                                             #
    if args.input_video != 'webcam':
        video_dev = cv2.VideoCapture(args.input_video)
        channel = 'VIDEO'
        print('File opened correctly:', video_dev.isOpened())
    else:
        video_dev = cv2.VideoCapture(video_dev_id)  # webcam
        print('Webcam working:', video_dev.isOpened())

    if not video_dev.isOpened():
        print('Exiting because webcam/file is not working')
        exit()

    # ps3 eyetoy can do 125fps
    try:
        video_dev.set(cv2.CAP_PROP_FPS, 125)
    except Exception:
        pass

    fps = video_dev.get(cv2.CAP_PROP_FPS)
    if fps == 0.0:
        fps = 125.0
    frame_time_ms = int(1000. / float(fps))
    time_bin_ms = frame_time_ms // num_bits

    if args.output_file and args.save_video:
        fourcc = cv2.VideoWriter_fourcc(*'MP42')
        video_writer = cv2.VideoWriter(args.output_file[:-4] + "_video.avi",
                                       fourcc, fps, (cam_res, cam_res))

    # ---------------------- main loop -------------------------------------#

    WINDOW_NAME = 'spikes'
    cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_AUTOSIZE)
    cv2.startWindowThread()

    output_spikes = []

    # if output_type == OUTPUT_TIME or output_type == OUTPUT_RATE:
    #     num_bits = np.floor(frame_time_ms)
    # else:
    #     num_bits = 5.

    is_first_pass = True
    start_time = time.time()
    end_time = 0
    frame_count = 0
    total_time = 0

    while True:
        # get an image from video source
        if is_first_pass:
            curr[:], scale_width, scale_height, col_from, col_to, frame = grab_first(
                video_dev, cam_res, channel)
            is_first_pass = False
        else:
            read_correctly, raw = video_dev.read()
            if not read_correctly:
                break
            curr[:], frame = grab_frame(raw, scale_width, scale_height,
                                        col_from, col_to, channel)

        # do the difference
        diff[:], abs_diff[:], spikes[:] = gs.thresholded_difference(
            curr, ref, threshold)

        # inhibition ( optional )
        if is_inh_on:
            spikes[:] = gs.local_inhibition(spikes, abs_diff, inh_coords,
                                            width, height, inh_width)

        # update the reference
        ref[:] = update_ref(output_type, abs_diff, spikes, ref, threshold,
                            frame_time_ms, num_bits, history_weight,
                            log2_table)

        # convert into a set of packages to send out
        neg_spks, pos_spks, max_diff = gs.split_spikes(spikes, abs_diff,
                                                       polarity)

        spike_lists = make_spikes_lists(output_type, pos_spks, neg_spks,
                                        max_diff, up_down_shift, data_shift,
                                        data_mask, frame_time_ms, threshold,
                                        max_threshold, num_bits, log2_table)

        spk_img[:] = gs.render_frame(spikes, curr, cam_res, cam_res, polarity)
        cv2.imshow(WINDOW_NAME, spk_img.astype(uint8))

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        end_time = time.time()

        # Compute frames per second
        if end_time - start_time >= 1.0:
            print('{} frames per second'.format(frame_count))
            frame_count = 0
            start_time = time.time()
        else:
            frame_count += 1

        # Write spikes out in correct format
        time_index = 0
        for spk_list in spike_lists:
            for spk in spk_list:
                output_spikes.append('{},{:f}'.format(spk,
                                                      total_time + time_index))
            time_index += time_bin_ms

        total_time += frame_time_ms

        # write the frame
        if args.output_file and args.save_video:
            video_writer.write(cv2.resize(frame, (int(cam_res), int(cam_res))))

    if args.output_file:
        # First line is dimension of video
        with open(args.output_file, 'w') as fh:
            fh.write('{}\n'.format(cam_res))
            fh.write('{}\n'.format(total_time))
            fh.write('\n'.join(output_spikes))

        if args.save_video:
            video_writer.release()

    cv2.destroyAllWindows()
    cv2.waitKey(1)
示例#7
0
max_threshold = 180 # 12*15 ~ 0.7*255

scale_width = 0
scale_height = 0
col_from = 0
col_to = 0

curr     = np.zeros(shape,     dtype=int16) 
ref      = 128*np.ones(shape,  dtype=int16) 

# -------------------------------------------------------------------- #
# inhibition related                                                   #

inh_width = 2
is_inh_on = False
inh_coords = gs.generate_inh_coords(width, height, inh_width)



def main():


  # -------------------------------------------------------------------- #
  # camera/frequency related                                             #
  
  video_dev = cv2.VideoCapture(video_dev_id) # webcam
  #~ video_dev = cv2.VideoCapture('./120fps HFR Sample.mp4') # webcam
  
  #ps3 eyetoy can do 125fps
  try:
    video_dev.set(cv2.CAP_PROP_FRAME_WIDTH, 320)