def apply(self, text): tokens = [token.text for token in text] scores = self.model(tokens) events = [] for i, w in enumerate(text): if type(scores[i]) == float: values = [Value(text, self, {self.name: scores[i]})] elif type(scores[i]) == dict: values = [] for k in scores[i].keys(): values.append( Value(text, self, {self.name + '_' + k: scores[i][k]})) # parse dictionary into list if getting dict back event = Event(onset=w.onset, duration=w.duration, values=values) events.append(event) return events
def apply(self, stim): data = self._stft(stim) events = [] time_bins = np.arange(0., stim.duration - self.frame_size, self.hop_size) if isinstance(self.freq_bins, int): bins = [] bin_size = data.shape[1] / self.freq_bins for i in range(self.freq_bins): bins.append((i * bin_size, (i + 1) * bin_size)) self.freq_bins = bins for i, tb in enumerate(time_bins): ev = Event(onset=tb, duration=self.frame_size) value_data = {} for fb in self.freq_bins: label = '%d_%d' % fb start, stop = fb val = data[i, start:stop].mean() if np.isinf(val): val = 0. value_data[label] = val ev.add_value(Value(stim, self, value_data)) events.append(ev) return events
def apply(self, stim, show=False): events = [] for i, f in enumerate(stim): img = f.data img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if i == 0: last_frame = img total_flow = 0 flow = cv2.calcOpticalFlowFarneback( last_frame, img, 0.5, 3, 15, 3, 5, 1.2, 0) flow = np.sqrt((flow ** 2).sum(2)) if show: cv2.imshow('frame', flow.astype('int8')) cv2.waitKey(1) last_frame = img total_flow = flow.sum() value = Value(stim, self, {'total_flow': total_flow}) event = Event(onset=f.onset, duration=f.duration, values=[value]) events.append(event) return events
def apply(self, stim): events = [] time_bins = np.arange(0., stim.duration, 1.) for i, tb in enumerate(time_bins): ev = Event(onset=tb, duration=1000) ev.add_value(Value(stim, self, {'second': i})) return events
def apply(self, stim): # Taken from # http://stackoverflow.com/questions/7765810/is-there-a-way-to-detect-if-an-image-is-blurry?lq=1 data = stim.data gray_image = cv2.cvtColor(data, cv2.COLOR_BGR2GRAY) sharpness = np.max(cv2.convertScaleAbs(cv2.Laplacian(gray_image, 3))) / 255.0 return Value(stim, self, {'sharpness': sharpness})
def apply(self, img): data = img.data temp_file = tempfile.mktemp() + '.png' imsave(temp_file, data) tags = self.tagger.tag_images(open(temp_file, 'rb'), select_classes=self.select_classes) os.remove(temp_file) return Value(img, self, {'tags': tags})
def test_dummy_code_timeline(self): data = [{'A': 12.0, 'B': 'abc'}, { 'A': 7, 'B': 'def'}, { 'C': 40 }] events = [Event(values=[Value(None, None, x)], duration=1) for x in data] tl = Timeline(events=events, period=1) self.assertEqual(tl.to_df().shape, (5, 4)) tl_dummy = tl.dummy_code() self.assertEqual(tl_dummy.to_df().shape, (7, 4)) tl = Timeline(events=events, period=1) tl_dummy = tl.dummy_code(string_only=False) self.assertEqual(tl_dummy.to_df().shape, (9, 4))
def apply(self, stim): # pySaliencyMap from https://github.com/akisato-/pySaliencyMap data = stim.data # Initialize variables h, w, c = stim.data.shape sm = pySaliencyMap.pySaliencyMap(h, w) # Compute saliency maps and store full maps as derivatives stim.derivatives = dict() stim.derivatives['saliency_map'] = sm.SMGetSM(stim.data) stim.derivatives['binarized_map'] = sm.SMGetBinarizedSM(stim.data) #thresholding done using Otsu # Compute summary statistics output = {} output['max_saliency'] = np.max(stim.derivatives['saliency_map']) output['max_y'], output['max_x'] = [list(i)[0] for i in np.where(stim.derivatives['saliency_map']==output['max_saliency'])] output['frac_high_saliency'] = np.sum(stim.derivatives['binarized_map']/255.0)/(h * w) return Value(stim, self, output)
def apply(self, stim): amps = stim.data sampling_rate = stim.sampling_rate elements = stim.transcription.elements events = [] for i, el in enumerate(elements): onset = sampling_rate * el.onset duration = sampling_rate * el.duration r_onset = np.round(onset).astype(int) r_offset = np.round(onset + duration).astype(int) if not r_offset <= amps.shape[0]: raise Exception('Block ends after data.') mean_amplitude = np.mean(amps[r_onset:r_offset]) amplitude_data = {'mean_amplitude': mean_amplitude} ev = Event(onset=onset, duration=duration) ev.add_value(Value(stim, self, amplitude_data)) events.append(ev) return events
def apply(self, audio): with sr.AudioFile(audio.filename) as source: clip = self.recognizer.record(source) text = self.recognizer.recognize_wit(clip, self.api_key) return Value(text, self, {'text': text})
def extract(self, extractors): vals = {} for e in extractors: vals[e.name] = e.apply(self) return Value(self, e, vals)
def apply(self, img): data = img.data avg_color = np.var(data, 2).mean() return Value(img, self, {'avg_color': avg_color})
def apply(self, img): data = img.data avg_brightness = np.amax(data, 2).mean() / 255.0 return Value(img, self, {'avg_brightness': avg_brightness})
def apply(self, stim): return Value(stim, self, {'constant': 1})
def apply(self, stim): data = stim.data avg_color = np.var(data, 2).mean() return Value(stim, self, {'avg_color': avg_color})
def apply(self, stim): data = stim.data avg_brightness = np.amax(data, 2).mean() / 255.0 return Value(stim, self, {'avg_brightness': avg_brightness})