def __init__(self): tensorflow_path = rospy.get_param( "tensorflow_path", os.path.expanduser('~') + "/.opt/tensorflow_venv/lib/python2.7/site-packages") use_gpu = rospy.get_param('use_gpu', 0) network_input_height = float( rospy.get_param('network_input_height', '240')) network_input_width = float( rospy.get_param('network_input_width', '320')) clip = bool(rospy.get_param('~clip', 'False')) self.saliency = Saliency(tensorflow_path, use_gpu, network_input_height, network_input_width, clip) image_sub = rospy.Subscriber("/rgb/image_raw", Image, self.image_callback, queue_size=1, buff_size=2**24) self.saliency_map_pub = rospy.Publisher("/saliency_map", Float32MultiArray, queue_size=1) self.saliency_map_image_pub = rospy.Publisher("/saliency_map_image", Image, queue_size=1) self.cv_bridge = CvBridge()
class SaliencyNode(): def __init__(self): tensorflow_path = rospy.get_param( "tensorflow_path", os.path.expanduser('~') + "/.opt/tensorflow_venv/lib/python2.7/site-packages") use_gpu = rospy.get_param('use_gpu', 0) network_input_height = float( rospy.get_param('network_input_height', '240')) network_input_width = float( rospy.get_param('network_input_width', '320')) clip = bool(rospy.get_param('~clip', 'False')) self.saliency = Saliency(tensorflow_path, use_gpu, network_input_height, network_input_width, clip) image_sub = rospy.Subscriber("/rgb/image_raw", Image, self.image_callback, queue_size=1, buff_size=2**24) self.saliency_map_pub = rospy.Publisher("/saliency_map", Float32MultiArray, queue_size=1) self.saliency_map_image_pub = rospy.Publisher("/saliency_map_image", Image, queue_size=1) self.cv_bridge = CvBridge() def __del__(self): self.sess.close() def image_callback(self, image): try: image = self.cv_bridge.imgmsg_to_cv2(image, "bgr8") except CvBridgeError as e: print e saliency_map = self.saliency.compute_saliency_map(image) height = MultiArrayDimension(size=len(saliency_map)) width = MultiArrayDimension(size=len(saliency_map[0])) lo = MultiArrayLayout([height, width], 0) self.saliency_map_pub.publish( Float32MultiArray(layout=lo, data=saliency_map.flatten())) try: saliency_map_image = self.cv_bridge.cv2_to_imgmsg( np.uint8(saliency_map * 255.), "mono8") except CvBridgeError as e: print e self.saliency_map_image_pub.publish(saliency_map_image)
help='number of SALICON image used to compute the fitness') parser.add_argument('--n-iter', type=int, default=50, help='number of CMAES iterations') parser.add_argument('--seed', type=int, default=10, help='random seed') args = parser.parse_args() try: os.makedirs(args.out) except OSError as e: print(e) model_file = args.model saliency = Saliency(model_file=model_file) fixation = sio.loadmat(args.fixations) all_user_fixations = [ len(fixation['gaze'][i][0][2]) for i in range(len(fixation['gaze'])) ] time_limit = fixation["gaze"][0][0][1][-1][0] gt_mean_rate = np.mean(all_user_fixations) * 1000. / time_limit path_to_image = os.path.join(args.images, fixation['image'][0] + '.jpg') print( "Ground truth image {} has mean saccade rate: {}, std: {} ({} participants)" .format(path_to_image, gt_mean_rate, np.std(all_user_fixations), len(fixation['gaze']))) image = misc.imread(path_to_image) saliency_map = saliency.compute_saliency_map(image) dt = 5.
from cv_bridge import CvBridge from attention import Saliency from std_msgs.msg import Float32MultiArray from image_geometry import StereoCameraModel from embodied_attention.srv import Transform tensorflow_path = rospy.get_param( "tensorflow_path", "/opt/tensorflow_venv/lib/python2.7/site-packages") model_file = rospy.get_param('saliency_file', '/tmp/model.ckpt') network_input_height = float(rospy.get_param('network_input_height', '192')) network_input_width = float(rospy.get_param('network_input_width', '256')) @nrp.MapVariable("saliency", initial_value=Saliency(tensorflow_path, model_file, network_input_height, network_input_width, False)) @nrp.MapVariable("saliency_pub", initial_value=rospy.Publisher("/saliency_map", Float32MultiArray, queue_size=1)) @nrp.MapVariable("saliency_image_pub", initial_value=rospy.Publisher("/saliency_map_image", Image, queue_size=1)) @nrp.MapVariable("bridge", initial_value=CvBridge()) @nrp.MapVariable("points", initial_value=[], scope=nrp.GLOBAL) @nrp.MapVariable("camera_model", initial_value=StereoCameraModel()) @nrp.MapVariable("camera_info_left", initial_value=None, scope=nrp.GLOBAL) @nrp.MapVariable("camera_info_right", initial_value=None, scope=nrp.GLOBAL) @nrp.MapVariable("disparity_image", initial_value=None, scope=nrp.GLOBAL)
import rospy from sensor_msgs.msg import Image from geometry_msgs.msg import PointStamped from cv_bridge import CvBridge from attention import Saliency from std_msgs.msg import Float32MultiArray from image_geometry import PinholeCameraModel from embodied_attention.srv import Transform tensorflow_path = rospy.get_param("tensorflow_path", "/opt/tensorflow_venv/lib/python2.7/site-packages") model_file = rospy.get_param('saliency_file', '/tmp/model.ckpt') network_input_height = float(rospy.get_param('network_input_height', '192')) network_input_width = float(rospy.get_param('network_input_width', '256')) @nrp.MapVariable("saliency", initial_value = Saliency(tensorflow_path, model_file, network_input_height, network_input_width, False)) @nrp.MapVariable("saliency_pub", initial_value = rospy.Publisher("/saliency_map", Float32MultiArray, queue_size=1)) @nrp.MapVariable("saliency_image_pub", initial_value = rospy.Publisher("/saliency_map_image", Image, queue_size=1)) @nrp.MapVariable("bridge", initial_value=CvBridge()) @nrp.MapVariable("points", initial_value=[], scope=nrp.GLOBAL) @nrp.MapVariable("camera_model", initial_value=PinholeCameraModel()) @nrp.MapVariable("camera_info_left", initial_value=None, scope=nrp.GLOBAL) @nrp.MapVariable("last_time", initial_value = None) @nrp.MapVariable("elapsed", initial_value = 0) @nrp.MapVariable("pan", initial_value = 0, scope=nrp.GLOBAL) @nrp.MapVariable("tilt", initial_value = 0, scope=nrp.GLOBAL) @nrp.MapVariable("saliency_map", initial_value = None) @nrp.MapRobotSubscriber("image", Topic('/icub_model/left_eye_camera/image_raw', sensor_msgs.msg.Image) def image_to_saliency(t, image, bridge, saliency, saliency_pub, saliency_image_pub, points, camera_model, camera_info_left, last_time, elapsed, pan, tilt, saliency_map): if image.value is None or camera_info_left.value is None: return
help='path to the input image') parser.add_argument('--rf_modulation_type', type=str, default='none', help='type of receptive field modulation') parser.add_argument('--seed', type=int, default=10, help='random seed') args = parser.parse_args() np.random.seed(args.seed) try: os.makedirs(args.out) except OSError as e: print(e) # demonstration of saliency model saliency = Saliency(use_gpu=args.gpu) image = cv.imread(args.image, 1) saliency_map = saliency.compute_saliency_map(image) # plt.imsave(path.join(args.out, 'saliency.png'), saliency_map, cmap=plt.get_cmap('gray')) # demonstration of saccade model def plot_targets(all_targets, all_times, saliency_map, out): fig = plt.figure() ax = fig.add_subplot(111) ax.imshow(saliency_map, vmin=0, vmax=1., aspect='equal', interpolation='none', cmap=plt.get_cmap('gray'), origin='upper') ax.set_axis_off()
parser.add_argument( '--rf-radius-score', type=int, default=5, help= 'radius of circle taken into account to compute score in saliency pixels') parser.add_argument('--seed', type=int, default=10, help='random seed') args = parser.parse_args() try: os.makedirs(args.out) except OSError as e: print(e) saliency = Saliency(use_gpu=args.gpu) images = [cv.imread(im, 1) for im in args.image] saliency_maps = [saliency.compute_saliency_map(im) for im in images] for (i, sal) in enumerate(saliency_maps): plt.imsave(path.join(args.out, 'saliency_{}.png'.format(i)), sal, cmap=plt.get_cmap('gray')) dt = 5. # ordered list of params to optimize saccade_params = OrderedDict([('sig_lat', .1), ('sig_rf', 0.267), ('sig_IoR', .1), ('amp_lat', .001), ('amp_rf', 0.008), ('amp_IoR', 1.5), ('amp_noise', .09), ('k', .017), ('g', .33), ('theta', 6.), ('tau', 50.)])