def start_dreaming(self, frame): layers = ['inception_3b/5x5_reduce', 'inception_4a/5x5_reduce', 'inception_4c/3x3_reduce', 'inception_5b/3x3_reduce'] self.layer = layers[self.__i_layer] self.__i_layer += 1 if self.__i_layer >= len(layers): self.__i_layer = 0 self.style_data = gd.get_layers_data(self.net, 'ImagesIn/elephants2.jpg', self.layer) self.subject_data = gd.get_layers_data_image(self.net, self.input_filter(frame), self.layer) self.stl.setup_style_iterator(self.iterator[0]) vis = self.stl.next_frame(self.net, self.style_data, self.subject_data, self.layer) return vis
def start_dreaming(self, frame): layers = [ 'inception_3b/5x5_reduce', 'inception_4a/5x5_reduce', 'inception_4c/3x3_reduce', 'inception_5b/3x3_reduce' ] self.layer = layers[self.__i_layer] self.__i_layer += 1 if self.__i_layer >= len(layers): self.__i_layer = 0 self.style_data = gd.get_layers_data(self.net, 'ImagesIn/elephants2.jpg', self.layer) self.subject_data = gd.get_layers_data_image(self.net, self.input_filter(frame), self.layer) self.stl.setup_style_iterator(self.iterator[0]) vis = self.stl.next_frame(self.net, self.style_data, self.subject_data, self.layer) return vis
'end_sigma': 0.0, 'start_step_size': 6.0, 'end_step_size': 1.0 }, ] layer = 'inception_4b/output' net = ml.NetModels.setup_googlenet_model('../CommonCaffe/TrainedModels/') style_data = gd.get_layers_data(net, 'ImagesIn/elephants2.jpg', layer) img = cv2.imread('Paintings/wondering.jpg') img = images.Images.resize_image(480, 640, img) dreamer.Dreamer.input_filter(img) subject_data = gd.get_layers_data_image(net, img, layer) stl = dream_styles.Styles() stl.setup_style_iterator(iterator[0]) for i in range(0, iterator[0]['iter_n']): vis = stl.next_frame(net, style_data, subject_data, layer) cv2.imshow('Video', vis) cv2.waitKey(1) vis = cv2.cvtColor(vis, cv2.COLOR_RGB2BGR) display.Display().showResultCV(vis)
{ 'iter_n': 40, 'start_sigma': 1.5, 'end_sigma': 0.0, 'start_step_size': 6.0, 'end_step_size': 1.0 }, ] layer = 'inception_4b/output' net = ml.NetModels.setup_googlenet_model('../CommonCaffe/TrainedModels/') style_data = gd.get_layers_data(net, 'ImagesIn/elephants2.jpg', layer) img = cv2.imread('Paintings/wondering.jpg') img = images.Images.resize_image(480, 640, img) dreamer.Dreamer.input_filter(img) subject_data = gd.get_layers_data_image(net, img, layer) stl = dream_styles.Styles() stl.setup_style_iterator(iterator[0]) for i in range(0, iterator[0]['iter_n']): vis = stl.next_frame(net, style_data, subject_data, layer) cv2.imshow('Video', vis) cv2.waitKey(1) vis = cv2.cvtColor(vis, cv2.COLOR_RGB2BGR) display.Display().showResultCV(vis)