コード例 #1
0
ファイル: ros_service_api.py プロジェクト: Zhenyu-Li/apollo
    def execute_cmd(cmd_name):
        """Run ros command by sending GRPC requests and return HTTP response."""
        ToolStatus = runtime_status_pb2.ToolStatus
        channel = grpc.insecure_channel(gflags.FLAGS.hmi_ros_node_service)
        stub = ros_node_pb2.HMIRosNodeStub(channel)

        response = None
        status = runtime_status.RuntimeStatus
        if cmd_name == 'reset':
            request = ros_node_pb2.ChangeDrivingModeRequest(
                action=ros_node_pb2.ChangeDrivingModeRequest.RESET_TO_MANUAL)
            response = stub.ChangeDrivingMode(request)

            # Update runtime status.
            tool_status = status.get_tools()
            if tool_status.playing_status != ToolStatus.PLAYING_NOT_READY:
                tool_status.playing_status = ToolStatus.PLAYING_READY_TO_CHECK

        elif cmd_name == 'start_auto_driving':
            request = ros_node_pb2.ChangeDrivingModeRequest(
                action=ros_node_pb2.ChangeDrivingModeRequest.START_TO_AUTO)
            response = stub.ChangeDrivingMode(request)

            # Update runtime status.
            status.get_tools().playing_status = ToolStatus.PLAYING
        else:
            error_msg = 'RosServiceApi: Unknown command "{}"'.format(cmd_name)
            glog.error(error_msg)
            return error_msg, httplib.BAD_REQUEST

        status.broadcast_status_if_changed()
        glog.info('Processed command "{}", and get response:{}'.format(
            cmd_name, response))
        return 'OK', httplib.OK
コード例 #2
0
ファイル: feature.py プロジェクト: ih4cku/hmm-cnn-rnn
def train(split_name):
    glog.info('training PCA')
    split = config.splits[split_name]
    frames_info = load_frames_info(split)

    # select frames
    all_frames = []
    for samp_id in frames_info:
        samp_frames = [path.join(split.frames_dir, samp_id, frm) for frm in frames_info[samp_id]['frames']]
        all_frames.extend(samp_frames)
    sel_frame_pths = random.sample(all_frames, min(config.NUM_FRAMES_TRAIN, len(all_frames)))

    # read frames data
    sel_frames_data = load_frames(sel_frame_pths)

    # train PCA
    pca = PCA(n_components=0.99)
    pca.fit(sel_frames_data)

    # dump PCA
    pca_dump_dir = path.split(config.PCA_DUMP_PTH)[0]
    if not path.isdir(pca_dump_dir):
        os.makedirs(pca_dump_dir)
    joblib.dump(pca, config.PCA_DUMP_PTH)
    glog.info('PCA dump to %s', config.PCA_DUMP_PTH)
コード例 #3
0
 def launch_jobs(self):
     if self.isV2:
         self.launch_jobs_v2()
         return
     argList = []
     for n in range(self.numIm_):
         argList.append([])
     self.labels_ = np.zeros((self.param_.batch_size, self.lblSz_, 1, 1), np.float32)
     # Form the list of images and labels
     for b in range(self.param_.batch_size):
         if self.wfid_.is_eof():
             self.wfid_.close()
             self.wfid_ = mpio.GenericWindowReader(self.param_.source)
             glog.info("RESTARTING READ WINDOW FILE")
         imNames, lbls = self.wfid_.read_next()
         self.labels_[b, :, :, :] = lbls.reshape(self.lblSz_, 1, 1).astype(np.float32)
         # Read images
         for n in range(self.numIm_):
             fName, ch, h, w, x1, y1, x2, y2 = imNames[n].strip().split()
             fName = osp.join(self.param_.root_folder, fName)
             x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
             argList[n].append([fName, (x1, y1, x2, y2), self.param_.crop_size, b, self.param_.is_gray])
             # Launch the jobs
     for n in range(self.numIm_):
         try:
             print (argList[n])
             self.jobs_[n] = self.pool_[n].map_async(self.readfn_, argList[n])
         except KeyboardInterrupt:
             print "Keyboard Interrupt received - terminating in launch jobs"
             self.pool_[n].terminate()
コード例 #4
0
ファイル: feature.py プロジェクト: ih4cku/hmm-cnn-rnn
def prepare_hmm_data(split_name):
    split = config.splits[split_name]
    frames_info = load_frames_info(split)
    if not path.isdir(split.hmm_data_dir):
        os.makedirs(split.hmm_data_dir)

    # text
    text_lines = []
    for samp_id in frames_info:
        text_lines.append(samp_id+' '+' '.join(frames_info[samp_id]['trans'])+'\n')
    with open(path.join(split.hmm_data_dir, 'text'), 'w') as f:
        f.writelines(text_lines)

    # utt2spk
    with open(path.join(split.hmm_data_dir, 'utt2spk'), 'w') as f:
        f.writelines([samp_id+' global\n' for samp_id in frames_info])
    # spk2utt
    with open(path.join(split.hmm_data_dir, 'spk2utt'), 'w') as f:
        f.writelines(' '.join(['global']+frames_info.keys()))

    # wrute scp, ark
    pca_featuers = joblib.load(split.pca_feature_pth)
    ark_fn = path.join(split.hmm_data_dir, 'feats.ark')
    scp_fn = path.join(split.hmm_data_dir, 'feats.scp')
    with kaldi_io.Float32MatrixWriter('ark,scp:%s,%s' % (ark_fn, scp_fn)) as w:
        for samp_id in pca_featuers:
            w[samp_id] = pca_featuers[samp_id]
    glog.info('%s hmm data done.', split_name)
コード例 #5
0
ファイル: extract_images.py プロジェクト: GeoffGao/apollo
def extract_images(bag, dst_dir, args):
    """Extract images to the destination dir."""
    time_nsecs = []
    pre_time_sec = 0
    bridge = cv_bridge.CvBridge()
    seq = 0
    for _, msg, t in bag.read_messages(topics=args.topic):
        # Check timestamp.
        cur_time_sec = msg.header.stamp.to_sec()
        if cur_time_sec - pre_time_sec < args.extract_rate:
            continue
        pre_time_sec = cur_time_sec
        time_nsecs.append(msg.header.stamp.to_nsec())

        # Save image.
        seq += 1
        msg.encoding = 'yuv422'
        img = bridge.imgmsg_to_cv2(msg, 'yuv422')
        img = cv2.cvtColor(img, cv2.COLOR_YUV2BGR_YUYV)
        img_file = os.path.join(dst_dir, '{}.jpg'.format(seq))
        cv2.imwrite(img_file, img)

        glog.info('#{}: header.seq={}, header.stamp={}, saved as {}'.format(
            seq, msg.header.seq, cur_time_sec, img_file))
    return time_nsecs
コード例 #6
0
ファイル: hmi_status_helper.py プロジェクト: Zhenyu-Li/apollo
 def report_status(status):
     """Report status to HMI."""
     json_dict = json_format.MessageToDict(status, False, True)
     try:
         req = requests.post(
             gflags.FLAGS.hmi_runtime_status_api, json=json_dict)
         glog.info('Put RuntimeStatus: {}'.format(req.json()))
     except Exception as e:
         glog.error('Failed to put RuntimeStatus: {}'.format(e))
コード例 #7
0
ファイル: system_cmd.py プロジェクト: Zhenyu-Li/apollo
def run_in_background(cmd, stdout_file, stderr_file):
    """Run command in background."""
    stdout_fd = open(config.Config.get_realpath(stdout_file), 'w')
    # Reuse the fd if it's the same file, such as the default '/dev/null'.
    stderr_fd = stdout_fd if stderr_file == stdout_file else open(
        config.Config.get_realpath(stderr_file), 'w')

    glog.info('Run command in background: {}'.format(cmd))
    subprocess.Popen(
        cmd, shell=True, stdout=stdout_fd, stderr=stderr_fd, close_fds=True)
コード例 #8
0
ファイル: dump_gpsbin.py プロジェクト: GeoffGao/apollo
def process_record_file(args):
    """Read record file and extract the message with specified channels"""
    freader = record.RecordReader(args.input_file)
    glog.info('#processing record file {}'.format(args.input_file))
    time.sleep(1)
    output_file = os.path.join(args.output_dir, 'gpsimu.bin')
    with open(output_file, 'wb') as outfile:
        for channel, message, _type, _timestamp in freader.read_messages():
            if channel == args.gps_raw_data_channel:
                raw_data = RawData()
                raw_data.ParseFromString(message)
                outfile.write(raw_data.data)
コード例 #9
0
ファイル: hmi_status_helper.py プロジェクト: Zhenyu-Li/apollo
    def report_hardware_status(hardware_status_list):
        """Report hardware status to HMI."""
        status_pb = runtime_status_pb2.RuntimeStatus()
        for hardware_status in hardware_status_list:
            status_pb.hardware.add().MergeFrom(hardware_status)

        json_dict = json_format.MessageToDict(status_pb, False, True)
        try:
            req = requests.post(
                gflags.FLAGS.hmi_runtime_status_api, json=json_dict)
            glog.info('Put HardwareStatus: {}'.format(req.json()))
        except Exception as e:
            glog.error('Failed to put HardwareStatus: {}'.format(e))
コード例 #10
0
ファイル: import_record.py プロジェクト: GeoffGao/apollo
    def Import(record_file):
        """Import one record."""
        parser = RecordParser(record_file)
        if not parser.ParseMeta():
            glog.error('Fail to parse record {}'.format(record_file))
            return

        parser.ParseMessages()
        doc = Mongo.pb_to_doc(parser.record)

        collection = Mongo.collection(gflags.FLAGS.mongo_collection_name)
        collection.replace_one({'path': parser.record.path}, doc, upsert=True)
        glog.info('Imported record {}'.format(record_file))
コード例 #11
0
	def forward(self, bottom, top):
		t1 = time.time()
		tDiff = t1 - self.t_
		#Load the images
		self.get_prefetch_data()
		top[0].data[...] = self.imData_
		t2 = time.time()
		tFetch = t2-t1
		#Read the labels
		top[1].data[:,:,:,:] = self.labels_
		self.launch_jobs()
		t2 = time.time()
		#print ('Forward took %fs in PythonWindowDataParallelLayer' % (t2-t1))
		glog.info('Prev: %f, fetch: %f forward: %f' % (tDiff,tFetch, t2-t1))
		self.t_ = time.time()
コード例 #12
0
    def _cluster_v2(prediction):
        """
        dbscan cluster
        :param prediction:
        :return:
        """
        db = DBSCAN(eps=0.7, min_samples=200).fit(prediction)
        db_labels = db.labels_
        unique_labels = np.unique(db_labels)
        unique_labels = [tmp for tmp in unique_labels if tmp != -1]
        log.info('聚类簇个数为: {:d}'.format(len(unique_labels)))

        num_clusters = len(unique_labels)
        cluster_centers = db.components_

        return num_clusters, db_labels, cluster_centers
コード例 #13
0
ファイル: populate_frames.py プロジェクト: GeoffGao/apollo
    def construct_current_frame(self, message):
        """Construct the current frame to make it ready for dumping."""
        if self._gps_pose._position is None:
            return
        for sensor in g_channel_process_map.values():
            sensor.construct_frame(self._current_frame)
        self._frame_count += 1
        point_cloud = PointCloud()
        point_cloud.ParseFromString(message)
        self._current_frame.timestamp = point_cloud.header.timestamp_sec
        self._current_frame.frame_seq = \
            self._skipped_frame_count + self._frame_count
        self._current_frame.data_file = os.path.basename(self._args.input_file)
        glog.info('#dumping frame {:d}: {:.7f}'.format(
		self._frame_count, self._current_frame.timestamp))
        self.dump_to_json_file()
        self._current_frame = frame_pb2.Frame()
コード例 #14
0
ファイル: gnet.py プロジェクト: cnsoft/GGSvr
def on_sub_server_login(data):
    global SUB_SERVER_MAP
    
    connection_id = data[0]
    name = data[1]
    id = data[2]

    if not name in SUB_SERVER_MAP:
        glog.error("gnet>on_sub_server_login sub_server NOT def:%s" % name)     
        return

    # 记录链接为服务器链接
    glog.info("gnet>sub server login sucees: (%s : %d)" % (name, id))
    SUB_SERVER_MAP[name][id] = connection_id
    
    # test
    '''
コード例 #15
0
def main(argv):
  argv = FLAGS(argv)

  loaded_mass = 7+4.0
  #loaded_mass = 0
  #observer_elevator = None

  # Test moving the Elevator
  initial_X = numpy.matrix([[0.0], [0.0]])
  up_R = numpy.matrix([[0.4572], [0.0], [0.0]])
  down_R = numpy.matrix([[0.0], [0.0], [0.0]])
  totemass = 3.54
  scenario_plotter = ScenarioPlotter()

  elevator_controller = IntegralElevator(mass=4*totemass + loaded_mass)
  observer_elevator = IntegralElevator(mass=4*totemass + loaded_mass)

  for i in xrange(0, 7):
    elevator = Elevator(mass=i*totemass + loaded_mass)
    glog.info('Actual poles are %s', str(numpy.linalg.eig(elevator.A - elevator.B * elevator_controller.K[0, 0:2])[0]))

    elevator.X = initial_X
    scenario_plotter.run_test(elevator, goal=up_R, controller_elevator=elevator_controller,
                              observer_elevator=observer_elevator, iterations=200)
    scenario_plotter.run_test(elevator, goal=down_R, controller_elevator=elevator_controller,
                              observer_elevator=observer_elevator, iterations=200)

  if FLAGS.plot:
    scenario_plotter.Plot()

  # Write the generated constants out to a file.
  if len(argv) != 5:
    glog.fatal('Expected .h file name and .cc file name for the Elevator and integral elevator.')
  else:
    design_mass = 4*totemass + loaded_mass
    elevator = Elevator("Elevator", mass=design_mass)
    loop_writer = control_loop.ControlLoopWriter("Elevator", [elevator],
                                                 namespaces=['y2015_bot3', 'control_loops', 'elevator'])
    loop_writer.Write(argv[1], argv[2])

    integral_elevator = IntegralElevator("IntegralElevator", mass=design_mass)
    integral_loop_writer = control_loop.ControlLoopWriter("IntegralElevator", [integral_elevator],
                                                          namespaces=['y2015_bot3', 'control_loops', 'elevator'])
    integral_loop_writer.Write(argv[3], argv[4])
コード例 #16
0
ファイル: feature.py プロジェクト: ih4cku/hmm-cnn-rnn
def extract_features(split_name):
    split = config.splits[split_name]

    # load PCA
    pca = joblib.load(config.PCA_DUMP_PTH)

    frames_info = load_frames_info(split)
    pca_featuers = OrderedDict()
    for samp_id in frames_info:
        samp_frames = [path.join(split.frames_dir, samp_id, frm) for frm in frames_info[samp_id]['frames']]
        raw_samp_data = load_frames(samp_frames)
        pca_samp_data = pca.transform(raw_samp_data)
        pca_featuers[samp_id] = pca_samp_data

    pca_feature_dir = path.split(split.pca_feature_pth)[0]
    if not path.isdir(pca_feature_dir):
        os.makedirs(pca_feature_dir)
    joblib.dump(pca_featuers, split.pca_feature_pth)
    glog.info('PCA features dump to %s', split.pca_feature_pth) 
コード例 #17
0
	def launch_jobs(self):
		argList = []
		for n in range(self.numIm_):
			argList.append([])
		self.labels_ = np.zeros((self.param_.batch_size, self.lblSz_,1,1),np.float32)
		#Form the list of images and labels
		for b in range(self.param_.batch_size):
			if self.wfid_.is_eof():	
				self.wfid_.close()
				self.wfid_   = mpio.GenericWindowReader(self.param_.source)
				glog.info('RESTARTING READ WINDOW FILE')
			imNames, lbls = self.wfid_.read_next()
			self.labels_[b,:,:,:] = lbls.reshape(self.lblSz_,1,1).astype(np.float32) 
			#Read images
			fNames, coords = [], []
			for n in range(self.numIm_):
				fName, ch, h, w, x1, y1, x2, y2 = imNames[n].strip().split()
				fNames.append(osp.join(self.param_.root_folder, fName))
				x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
				coords.append((x1, y1, x2, y2))
			#Computing jittering if required
			dx, dy = self.get_jitter(coords)
			for n in range(self.numIm_):
				fName = fNames[n]
				x1, y1, x2, y2 = coords[n]
				#Jitter the box
				x1 = max(0, x1 + dx)
				y1 = max(0, y1 + dy)
				x2 = min(w, x2 + dx)
				y2 = min(h, y2 + dy)
				#glog.info('%d, %d, %d, %d' % (x1, y1, x2, y2))
				argList[n].append([fName, (x1,y1,x2,y2), self.param_.crop_size,
									 b, self.param_.is_gray, self.param_.is_mirror])
		#Launch the jobs
		for n in range(self.numIm_):
			try:
				#print (argList[n])
				self.jobs_[n] = self.pool_[n].map_async(self.readfn_, argList[n])
			except KeyboardInterrupt:
				print 'Keyboard Interrupt received - terminating in launch jobs'
				self.pool_[n].terminate()	
コード例 #18
0
ファイル: runtime_status.py プロジェクト: Zhenyu-Li/apollo
    def _calculate_playing_status(cls, modules_and_hardware_ready):
        """Calculate playing status."""
        ToolStatus = runtime_status_pb2.ToolStatus
        tool_status = cls.get_tools()

        playing_status = tool_status.playing_status
        if tool_status.playing_status == ToolStatus.PLAYING_NOT_READY:
            if tool_status.recording_status == ToolStatus.RECORDING_FINISHED:
                if cls.playable_duration > 0:
                    tool_status.playing_status = \
                    ToolStatus.PLAYING_READY_TO_CHECK
                else:
                    glog.info('RuntimeStatus::_calculate_playing_status: ' \
                              'No file to play')
        elif (playing_status == ToolStatus.PLAYING_CHECKING and
              modules_and_hardware_ready and tool_status.planning_ready):
            tool_status.playing_status = ToolStatus.PLAYING_READY_TO_START
            glog.info(
                'RuntimeStatus::_calculate_playing_status: ' \
                'All modules/hardware are ready')
        elif playing_status == ToolStatus.PLAYING_READY_TO_START and not (
                modules_and_hardware_ready and tool_status.planning_ready):
            tool_status.playing_status = ToolStatus.PLAYING_CHECKING
            glog.info('RuntimeStatus::_calculate_playing_status: ' \
                      'Not all modules/hardware are ready')
コード例 #19
0
ファイル: populate_frames.py プロジェクト: GeoffGao/apollo
 def process_record_file(self):
     """Read record file and extract the message with specified channels"""
     freader = record.RecordReader(self._args.input_file)
     time.sleep(1)
     glog.info('#processing record file {}'.format(self._args.input_file))
     for channel, message, _type, _timestamp in freader.read_messages():
         if self._frame_count >= self._args.maximum_frame_count:
             glog.info('#reached the maximum frame count, exiting now')
             return self._args.maximum_frame_count
         if channel in g_channel_process_map:
             if self._skipped_frame_count < self._args.skip_frame_count:
                 if g_channel_process_map[channel]._is_primary:
                     self._skipped_frame_count += 1
                 continue
             g_channel_process_map[channel].process(message)
             if g_channel_process_map[channel]._is_primary:
                 if not self.check_all_sensors_data():
                     g_channel_process_map[channel].clear_data()
                     self._skipped_frame_count += 1
                     continue
                 self.construct_current_frame(message)
     return self._frame_count
コード例 #20
0
ファイル: tool_api.py プロジェクト: Zhenyu-Li/apollo
    def get(self, tool_name):
        """Run tool command and return HTTP response as (content, status)."""
        glog.info("ToolApi receives:" + tool_name)
        if tool_name == 'setup_recording':
            if not runtime_status.RuntimeStatus.are_all_modules_ready():
                module_api.ModuleApi.execute_cmd('all', 'start')
            if not runtime_status.RuntimeStatus.are_all_hardware_ready():
                hardware_api.HardwareApi.execute_cmd('all', 'health_check')
        elif tool_name == 'reset_recording':
            file_to_play = config.Config.get_realpath(gflags.FLAGS.file_to_play)
            if os.path.exists(file_to_play):
                os.rename(file_to_play, file_to_play + '.bak')
            # Also stop player in case user has set it up.
            ToolApi._exec_bash_tool('stop_player')
            runtime_status.RuntimeStatus.get_tools().planning_ready = False
        elif tool_name == 'setup_playing':
            if not runtime_status.RuntimeStatus.are_all_modules_ready():
                module_api.ModuleApi.execute_cmd('all', 'start')
            if not runtime_status.RuntimeStatus.are_all_hardware_ready():
                hardware_api.HardwareApi.execute_cmd('all', 'health_check')
            ToolApi._exec_bash_tool('start_player')
        elif tool_name == 'start_playing':
            # The RESET command will try multiple times to change driving mode
            # to MANUAL.
            ros_service_api.RosServiceApi.execute_cmd('reset')
            ros_service_api.RosServiceApi.execute_cmd('start_auto_driving')
        elif tool_name == 'stop_playing':
            ros_service_api.RosServiceApi.execute_cmd('reset')
            ToolApi._exec_bash_tool('stop_player')
        elif tool_name == 'reset_all':
            module_api.ModuleApi.execute_cmd('all', 'stop')
            runtime_status.RuntimeStatus.reset()
            ToolApi._exec_bash_tool('stop_player')
            ToolApi._exec_bash_tool('stop_recording')
        else:
            ToolApi._exec_bash_tool(tool_name)

        ToolApi._update_runtime_status(tool_name)
        return 'OK', httplib.OK
コード例 #21
0
ファイル: parse_record.py プロジェクト: GeoffGao/apollo
 def ProcessChassis(self, msg):
     """Process Chassis, save disengagements."""
     chassis = Chassis()
     chassis.ParseFromString(msg)
     timestamp = chassis.header.timestamp_sec
     if self._current_driving_mode == chassis.driving_mode:
         # DrivingMode doesn't change.
         return
     # Save disengagement.
     if (self._current_driving_mode == Chassis.COMPLETE_AUTO_DRIVE and
         chassis.driving_mode == Chassis.EMERGENCY_MODE):
         glog.info('Disengagement found at', timestamp)
         disengagement = self.record.disengagements.add(time=timestamp)
         if self._last_position is not None:
             lat, lon = utm.to_latlon(self._last_position.x,
                                      self._last_position.y,
                                      gflags.FLAGS.utm_zone_id,
                                      gflags.FLAGS.utm_zone_letter)
             disengagement.location.lat = lat
             disengagement.location.lon = lon
     # Update DrivingMode.
     self._current_driving_mode = chassis.driving_mode
コード例 #22
0
def main(argv):
    db_file = None
    list_file = None
    img_path = None
    ext = ".jpg"
    if_check = False
    help_msg = "dataset_create_imagelist.py -i <lmdb> -p <image path> -o <list>\
--check\n\
-i <lmdb>           The input lmdb database file\n\
-o <list>           The output image list file\n\
-p <image path>     The path which store the downloaded images\n\
--check [optional]  Force to check if the jpg image can be loaded.\n\
                    Which will slow down the process. Default False"
    try:
        opts, args = getopt.getopt(argv, "hi:p:o:", ["check"])
    except getopt.GetoptError:
        print help_msg
        sys.exit(2)

    for opt, arg in opts:
        if opt == "-h":
            print help_msg
            sys.exit()
        elif opt == "-i":
            db_file = arg
        elif opt == "-o":
            list_file = arg
        elif opt == "-p":
            img_path = arg
        elif opt == "--check":
            if_check = True
        else:
            print help_msg
            sys.exit(2)
    # Check arguments
    if db_file is None or list_file is None or img_path is None:
        print help_msg
        sys.exit(2)

    # Check if the image path exists
    log.info("Check image path %s" % img_path)
    if os.path.exists(img_path) is False:
        log.fatal("Can not locate the image path %s" % img_path)
        sys.exit(2)
    # Create the text list file
    log.info("Open the image list file %s" % list_file)
    try:
        fp = open(list_file, "w")
    except IOError:
        log.fatal("Can not open %s for writing" % list_file)
        sys.exit(2)
    # open the lmdb file
    log.info("Open db file %s" % db_file)
    db = lt.open_db_ro(db_file)
    db_stat = db.stat()
    log.info("Total Entries: %d" % db_stat["entries"])
    bar = eb.EasyProgressBar()
    bar.set_end(db_stat["entries"])
    bar.start()
    counter = 0
    err_counter = 0
    # Iter the whole database
    with db.begin(write=False) as txn:
        with txn.cursor() as cur:
            for key, val in cur:
                counter += 1
                # Get the avaliable url to download photo
                try:
                    val_dic = yaml.load(val)
                    photo = myxml.parse_xml_to_etree(val_dic["photo"])
                    photo_id = photo.get("id")
                    focal_in35 = int(val_dic["focal_in35"])
                except:
                    err_counter += 1
                    continue
                # Filter our some error value
                if focal_in35 < 5 or focal_in35 > 200:
                    continue
                # Get the image full name
                if img_path[-1] == r"/":
                    img_name = img_path + photo_id + ext
                else:
                    img_name = img_path + r"/" + photo_id + ext
                img_name = os.path.abspath(img_name)
                # Check if the image exists
                if if_check:
                    # Load the image
                    try:
                        Image.open(img_name)
                    except:
                        err_counter += 1
                        continue
                else:
                    if os.path.exists(img_name) is False:
                        err_counter += 1
                        continue

                # Write the info to list file
                fp.writelines(img_name + " %d\n" % focal_in35)
                bar.update(counter)
    # Finish the loop
    db.close()
    fp.close()
    bar.finish()
    log.info("Finished. errors: %d" % err_counter)
コード例 #23
0
ファイル: kernel.py プロジェクト: unjambonakap/chdrft
 def start(self, end=-1, count=0, ip=None):
   self.want_stop = 0
   if ip is None: ip = self.regs.ins_pointer
   glog.info('Starting emulator at pc=%x', self.regs.ins_pointer)
   self.mu.emu_start(ip, end, count=count)
コード例 #24
0
def log_info_step(tag,time,one_step,total_step,num):
    log.info("{} time is {}sec/step ({} / {} steps) in this {}th sequence".format(tag,time,one_step,total_step,num))
コード例 #25
0
def log_info_total(tag,elapsed_time,total_num):
    log.info("{} time is {}sec (total img number : {})".format(tag,elapsed_time,total_num))
コード例 #26
0
def inform_finish_inference():
    log.info("finish this folders")
コード例 #27
0
    def setup(self, bottom, top):
        """Setup the RoIDataLayer."""

        self._cur_idx = 0
        self.gt_labels = {}
        self.meta = h5py.File('data/sg_vrd_meta.h5', 'r')
        h5_boxes = h5py.File('output/precalc/sg_vrd_objs.hdf5')
        self.cache_boxes = {}
        if os.path.exists('output/cache/sg_vrd_objs.pkl'):
            self.cache_boxes = zl.load('output/cache/sg_vrd_objs.pkl')
            glog.info('loaded obj data from cache')
        else:
            glog.info('Preloading obj')
            zl.tic()
            for k in h5_boxes['train'].keys():
                boxes = h5_boxes['train/%s/boxes' % k][...]
                self.cache_boxes[k] = boxes
            glog.info('done preloading obj %f' % zl.toc())
            zl.save('output/cache/sg_vrd_obj.pkl', self.cache_boxes)
        self.cache = h5py.File('output/sg_vrd_cache.h5', 'r')
        self.cache_mem = {}
        if os.path.exists('output/cache/sg_vrd_gt.pkl'):
            self.gt_labels = zl.load('output/cache/sg_vrd_gt.pkl')
            glog.info('loaded gt data from cache')
        else:
            glog.info('Preloading gt')
            zl.tic()
            for k in self.meta['gt/train'].keys():
                rlp_labels = self.meta['gt/train/%s/rlp_labels' % k][...]
                sub_boxes = self.meta['gt/train/%s/sub_boxes' % k][...].astype(
                    np.float)
                obj_boxes = self.meta['gt/train/%s/obj_boxes' % k][...].astype(
                    np.float)
                #if sub_boxes.shape[0]>0:
                #    zeros = np.zeros((sub_boxes.shape[0],1), dtype=np.float)
                #    # first index is always zero since we do one image by one image
                #    sub_boxes = np.concatenate((zeros, sub_boxes),axis=1)
                #    obj_boxes = np.concatenate((zeros, obj_boxes),axis=1)
                self.gt_labels[k] = {}
                self.gt_labels[k]['rlp_labels'] = rlp_labels
                self.gt_labels[k]['sub_boxes'] = sub_boxes
                self.gt_labels[k]['obj_boxes'] = obj_boxes
            glog.info('done preloading gt %f' % zl.toc())
            zl.save('output/cache/sg_vrd_gt.pkl', self.gt_labels)

        self.imids = []
        for k in self.gt_labels.keys():
            self.imids.append(k)
        self.imidx = 0
        random.shuffle(self.imids)
        # parse the layer parameter string, which must be valid YAML
        layer_params = yaml.load(self.param_str)

        self._num_classes = layer_params['num_classes']
        self._name_to_top_map = {}

        # data blob: holds a batch of N images, each with 3 channels
        idx = 0
        top[idx].reshape(cfg.TRAIN.IMS_PER_BATCH, 1024, 50, 50)
        self._name_to_top_map['conv_new_1'] = idx
        idx += 1

        top[idx].reshape(1, 5, 1, 1)
        self._name_to_top_map['sub_boxes'] = idx
        idx += 1

        top[idx].reshape(1, 5, 1, 1)
        self._name_to_top_map['obj_boxes'] = idx
        idx += 1
        top[idx].reshape(1, 5, 1, 1)
        self._name_to_top_map['union_boxes'] = idx
        idx += 1
        # labels blob: R categorical labels in [0, ..., K] for K foreground
        # classes plus background
        top[idx].reshape(1, 1, 1, 1)
        self._name_to_top_map['labels'] = idx
コード例 #28
0
    def attack_all_images(self, args, arch_name, target_model, surrogate_model, result_dump_path):
        for batch_idx, data_tuple in enumerate(self.dataset_loader):
            if args.dataset == "ImageNet":
                if target_model.input_size[-1] >= 299:
                    images, true_labels = data_tuple[1], data_tuple[2]
                else:
                    images, true_labels = data_tuple[0], data_tuple[2]
            else:
                images, true_labels = data_tuple[0], data_tuple[1]
            if images.size(-1) != target_model.input_size[-1]:
                images = F.interpolate(images, size=target_model.input_size[-1], mode='bilinear', align_corners=True)
            images = images.cuda()
            true_labels = true_labels.cuda()
            if self.targeted:
                if self.target_type == 'random':
                    target_labels = torch.randint(low=0, high=CLASS_NUM[args.dataset],
                                                  size=true_labels.size()).long().cuda()
                    invalid_target_index = target_labels.eq(true_labels)
                    while invalid_target_index.sum().item() > 0:
                        target_labels[invalid_target_index] = torch.randint(low=0, high=CLASS_NUM[args.dataset],
                                  size=target_labels[invalid_target_index].shape).long().cuda()
                        invalid_target_index = target_labels.eq(true_labels)
                elif args.target_type == 'least_likely':
                    logits = target_model(images)
                    target_labels = logits.argmin(dim=1)
                elif args.target_type == "increment":
                    target_labels = torch.fmod(true_labels + 1, CLASS_NUM[args.dataset])
                else:
                    raise NotImplementedError('Unknown target_type: {}'.format(args.target_type))
            else:
                target_labels = None

            self.attack_images(batch_idx, images, true_labels, target_labels, target_model, surrogate_model, args)
        self.not_done_all[(self.query_all > args.max_queries).bool()] = 1
        self.success_all[(self.query_all > args.max_queries).bool()] = 0
        log.info('{} is attacked finished ({} images)'.format(arch_name, self.total_images))
        log.info('        avg correct: {:.4f}'.format(self.correct_all.mean().item()))
        log.info('       avg not_done: {:.4f}'.format(self.not_done_all.mean().item()))  # 有多少图没做完
        if self.success_all.sum().item() > 0:
            log.info(
                '     avg mean_query: {:.4f}'.format(self.success_query_all[self.success_all.bool()].mean().item()))
            log.info(
                '   avg median_query: {:.4f}'.format(self.success_query_all[self.success_all.bool()].median().item()))
            log.info('     max query: {}'.format(self.success_query_all[self.success_all.bool()].max().item()))
        if self.not_done_all.sum().item() > 0:
            log.info(
                '  avg not_done_prob: {:.4f}'.format(self.not_done_prob_all[self.not_done_all.bool()].mean().item()))
        log.info('Saving results to {}'.format(result_dump_path))
        meta_info_dict = {"avg_correct": self.correct_all.mean().item(),
                          "avg_not_done": self.not_done_all[self.correct_all.bool()].mean().item(),
                          "mean_query": self.success_query_all[self.success_all.bool()].mean().item() if self.success_all.sum() > 0 else 0,
                          "median_query": self.success_query_all[self.success_all.bool()].median().item() if self.success_all.sum() > 0 else 0,
                          "max_query": self.success_query_all[self.success_all.bool()].max().item() if self.success_all.sum() > 0 else 0,
                          "correct_all": self.correct_all.detach().cpu().numpy().astype(np.int32).tolist(),
                          "not_done_all": self.not_done_all.detach().cpu().numpy().astype(np.int32).tolist(),
                          "query_all": self.query_all.detach().cpu().numpy().astype(np.int32).tolist(),
                          "not_done_prob": self.not_done_prob_all[self.not_done_all.bool()].mean().item(),
                          "args": vars(args)}
        if self.stats_grad_cosine_similarity:
            meta_info_dict['grad_cosine_similarities'] = self.cosine_similarity_all
            N = 0
            sum_cosine_similarity = 0.0
            for image_index, cos_dict in self.cosine_similarity_all.items():
                for q, cosine in cos_dict.items():
                    sum_cosine_similarity += abs(cosine)
                    N += 1
            avg_cosine_similarity = sum_cosine_similarity / N
            meta_info_dict["avg_cosine_similarity"] = avg_cosine_similarity
        with open(result_dump_path, "w") as result_file_obj:
            json.dump(meta_info_dict, result_file_obj, sort_keys=True)
        log.info("done, write stats info to {}".format(result_dump_path))
コード例 #29
0
def print_args(args):
    keys = sorted(vars(args).keys())
    max_len = max([len(key) for key in keys])
    for key in keys:
        prefix = ' ' * (max_len + 1 - len(key)) + key
        log.info('{:s}: {}'.format(prefix, args.__getattribute__(key)))
コード例 #30
0
        if args.attack_defense:
            log_file_path = osp.join(args.exp_dir, 'run_defense_{}_{}.log'.format(args.arch, args.defense_model))
        else:
            log_file_path = osp.join(args.exp_dir, 'run_{}.log'.format(args.arch))
    set_log_file(log_file_path)

    if args.test_archs:
        archs = []
        if args.dataset == "CIFAR-10" or args.dataset == "CIFAR-100":
            for arch in MODELS_TEST_STANDARD[args.dataset]:
                test_model_path = "{}/train_pytorch_model/real_image_model/{}-pretrained/{}/checkpoint.pth.tar".format(PY_ROOT,
                                                                                        args.dataset,  arch)
                if os.path.exists(test_model_path):
                    archs.append(arch)
                else:
                    log.info(test_model_path + " does not exists!")
        elif args.dataset == "TinyImageNet":
            for arch in MODELS_TEST_STANDARD[args.dataset]:
                test_model_list_path = "{root}/train_pytorch_model/real_image_model/{dataset}@{arch}*.pth.tar".format(
                    root=PY_ROOT, dataset=args.dataset, arch=arch)
                test_model_path = list(glob.glob(test_model_list_path))
                if test_model_path and os.path.exists(test_model_path[0]):
                    archs.append(arch)
        else:
            for arch in MODELS_TEST_STANDARD[args.dataset]:
                test_model_list_path = "{}/train_pytorch_model/real_image_model/{}-pretrained/checkpoints/{}*.pth".format(
                    PY_ROOT,
                    args.dataset, arch)
                test_model_list_path = list(glob.glob(test_model_list_path))
                if len(test_model_list_path) == 0:  # this arch does not exists in args.dataset
                    continue
コード例 #31
0
    def attack_images(self, batch_index, images, true_labels, target_labels, target_model, surrogate_model, args):
        image_step = self.l2_image_step if args.norm == 'l2' else self.linf_image_step
        img_idx_to_batch_idx = ImageIdxToOrigBatchIdx(args.batch_size)
        proj_step = self.l2_proj_step if args.norm == 'l2' else self.linf_proj_step
        criterion = self.cw_loss if args.loss == "cw" else self.xent_loss
        adv_images = images.clone()
        query = torch.zeros(images.size(0)).cuda()
        # cos_similarity = torch.zeros(images.size(0), args.max_queries)
        with torch.no_grad():
            logit = target_model(images)
            l = criterion(logit, true_labels, target_labels)
        pred = logit.argmax(dim=1)
        correct = pred.eq(true_labels).float()  # shape = (batch_size,)
        not_done = correct.clone()
        query = query + not_done
        selected = torch.arange(batch_index * args.batch_size, min((batch_index + 1) * args.batch_size, self.total_images))  # 选择这个batch的所有图片的index
        step_index = 0
        while query.min().item() < args.max_queries:
            step_index += 1
            surrogate_gradients = self.get_grad(surrogate_model, criterion, adv_images, true_labels, target_labels)
            if self.stats_grad_cosine_similarity:
                no_grad = target_model.no_grad
                target_model.no_grad = False
                true_grad = self.get_grad(target_model, criterion, adv_images, true_labels, target_labels)
                cosine_similarity = self.get_cos_similarity(surrogate_gradients, true_grad)
                target_model.no_grad = no_grad

            attempt_images = image_step(adv_images, surrogate_gradients, args.image_lr)
            attempt_images = proj_step(images, args.epsilon, attempt_images)
            attempt_images = torch.clamp(attempt_images, 0, 1).detach()
            with torch.no_grad():
                attempt_logits = target_model(attempt_images)
            attempt_positive_loss = criterion(attempt_logits, true_labels, target_labels)

            attempt_images = image_step(adv_images, -surrogate_gradients, args.image_lr)
            attempt_images = proj_step(images, args.epsilon, attempt_images)
            attempt_images = torch.clamp(attempt_images, 0, 1).detach()
            with torch.no_grad():
                attempt_logits = target_model(attempt_images)
            attempt_negative_loss = criterion(attempt_logits, true_labels, target_labels)

            idx_positive_improved = (attempt_positive_loss >= l).float().view(-1,1,1,1)
            idx_negative_improved = (attempt_negative_loss >= l).float().view(-1,1,1,1)

            query = query + not_done
            query = query + (1-idx_positive_improved).view(-1) * not_done

            random_grad = self.get_random_grad(images)

            attempt_images = image_step(adv_images, random_grad, args.image_lr)
            attempt_images = proj_step(images, args.epsilon, attempt_images)
            attempt_images = torch.clamp(attempt_images, 0, 1).detach()
            with torch.no_grad():
                attempt_logits = target_model(attempt_images)
            attempt_positive_loss_random_grad = criterion(attempt_logits, true_labels, target_labels)

            attempt_images = image_step(adv_images, -random_grad, args.image_lr)
            attempt_images = proj_step(images, args.epsilon, attempt_images)
            attempt_images = torch.clamp(attempt_images, 0, 1).detach()
            with torch.no_grad():
                attempt_logits = target_model(attempt_images)
            attempt_negative_loss_random_grad = criterion(attempt_logits, true_labels, target_labels)

            idx_positive_improved_random_grad = (attempt_positive_loss_random_grad >= l).float().view(-1, 1, 1, 1)
            idx_negative_improved_random_grad = (attempt_negative_loss_random_grad >= l).float().view(-1, 1, 1, 1)

            idx_positive_larger_than_negative = (attempt_positive_loss_random_grad >= attempt_negative_loss_random_grad).float().view(-1, 1, 1, 1)
            query = query + (1 - idx_positive_improved).view(-1) * (1 - idx_negative_improved).view(-1) * not_done
            query = query + (1 - idx_positive_improved).view(-1) * (1 - idx_negative_improved).view(-1) * (1-idx_positive_improved_random_grad).view(-1) * not_done

            grad = idx_positive_improved * surrogate_gradients  + \
                   (1 - idx_positive_improved) * idx_negative_improved * (-surrogate_gradients) + \
                  (1 - idx_positive_improved) * (1 - idx_negative_improved) * idx_positive_improved_random_grad * random_grad + \
                  (1 - idx_positive_improved) * (1 - idx_negative_improved) * (1- idx_positive_improved_random_grad) * idx_negative_improved_random_grad * (-random_grad) + \
                   (1 - idx_positive_improved) * (1 - idx_negative_improved) * (1-idx_positive_improved_random_grad) * (1-idx_negative_improved_random_grad) * idx_positive_larger_than_negative*random_grad + \
                   (1 - idx_positive_improved) * (1 - idx_negative_improved) * (1 - idx_positive_improved_random_grad) *\
                   (1 - idx_negative_improved_random_grad) * (1-idx_positive_larger_than_negative) * (-random_grad)
            lr = torch.ones(adv_images.size(0)).cuda().float()
            lr[idx_positive_improved.view(-1).bool()] = args.image_lr
            lr[((1 - idx_positive_improved).view(-1) * idx_negative_improved.view(-1)).bool()] = args.image_lr
            lr[((1 - idx_positive_improved).view(-1) * (1 - idx_negative_improved).view(-1)).bool()] = args.random_lr
            lr = lr.view(-1,1,1,1)

            adv_images = image_step(adv_images, grad, lr)
            adv_images = proj_step(images, args.epsilon, adv_images)
            adv_images = torch.clamp(adv_images, 0, 1).detach()

            with torch.no_grad():
                adv_logit = target_model(adv_images)
                adv_pred = adv_logit.argmax(dim=1)
                adv_prob = F.softmax(adv_logit, dim=1)
                l = criterion(adv_logit, true_labels, target_labels)

            if args.targeted:
                not_done = not_done * (1 - adv_pred.eq(target_labels).float()).float()  # not_done初始化为 correct, shape = (batch_size,)
            else:
                not_done = not_done * adv_pred.eq(true_labels).float()
            success = (1 - not_done) * correct
            success_query = success * query
            not_done_prob = adv_prob[torch.arange(adv_images.size(0)), true_labels] * not_done

            log.info('Attacking image {} - {} / {}, step {}, max query {}'.format(
                batch_index * args.batch_size, (batch_index + 1) * args.batch_size,
                self.total_images, step_index + 1, int(query.max().item())
            ))
            step_index += 1
            log.info('        correct: {:.4f}'.format(correct.mean().item()))
            log.info('       not_done: {:.4f}'.format(float(not_done.detach().cpu().sum().item()) / float(args.batch_size)))
            if success.sum().item() > 0:
                log.info('     mean_query: {:.4f}'.format(success_query[success.bool()].mean().item()))
                log.info('   median_query: {:.4f}'.format(success_query[success.bool()].median().item()))
            if not_done.sum().item() > 0:
                log.info('  not_done_prob: {:.4f}'.format(not_done_prob[not_done.bool()].mean().item()))

            not_done_np = not_done.detach().cpu().numpy().astype(np.int32)
            done_img_idx_list = np.where(not_done_np == 0)[0].tolist()
            delete_all = False
            if done_img_idx_list:
                for skip_index in done_img_idx_list:
                    batch_idx = img_idx_to_batch_idx[skip_index]
                    pos = selected[batch_idx].item()
                    for key in ['query', 'correct', 'not_done', 'success', 'success_query', 'not_done_prob']:
                        value_all = getattr(self, key + "_all")
                        value = eval(key)[skip_index].item()
                        value_all[pos] = value
                    if self.stats_grad_cosine_similarity:
                        self.cosine_similarity_all[pos][int(query[skip_index].item())] = cosine_similarity[skip_index].item()
                images, adv_images, query, true_labels, target_labels, correct, not_done, l = \
                    self.delete_tensor_by_index_list(done_img_idx_list, images, adv_images, query,
                                                     true_labels, target_labels, correct, not_done, l)
                img_idx_to_batch_idx.del_by_index_list(done_img_idx_list)
                delete_all = images is None
            if delete_all:
                break

        for key in ['query', 'correct',  'not_done',
                    'success', 'success_query', 'not_done_prob']:
            for img_idx, batch_idx in img_idx_to_batch_idx.proj_dict.items():
                pos = selected[batch_idx].item()
                value_all = getattr(self, key + "_all")
                value = eval(key)[img_idx].item()
                value_all[pos] = value  # 由于value_all是全部图片都放在一个数组里,当前batch选择出来

                if self.stats_grad_cosine_similarity:
                    assert cosine_similarity.size(0) == len(img_idx_to_batch_idx.proj_dict)
                    self.cosine_similarity_all[pos][int(query[img_idx].item())] = cosine_similarity[
                        img_idx].item()
        img_idx_to_batch_idx.proj_dict.clear()
コード例 #32
0
def latexifyConfMatrix(scorePath, familyNames):
    df = pd.read_csv(scorePath, delim_whitespace=True, names=familyNames)
    df.insert(0, 'Family', familyNames)
    log.info('Latex table for confusion matrix:\n' + df.to_latex(index=False))
コード例 #33
0
def find_delta(inputs_to_change):
    start = time.time()

    f = open("../../HCAS_rect_v6_pra0_tau00_25HU_3000.nnet", "r")

    in_line = f.readline()
    while in_line[0:2] == "//":
        in_line = f.readline()

    numLayers, inputSize, outputSize, _ = [
        int(x) for x in in_line.strip().split(",")[:-1]
    ]

    in_line = f.readline()
    layerSizes = [int(x) for x in in_line.strip().split(",")[:-1]]

    in_line = f.readline()
    symmetric = int(in_line.strip().split(",")[0])

    in_line = f.readline()
    inputMinimums = [float(x) for x in in_line.split(",")]

    in_line = f.readline()
    inputMaximums = [float(x) for x in in_line.split(",")]

    in_line = f.readline()
    inputMeans = [float(x) for x in in_line.split(",")[:-1]]

    in_line = f.readline()
    inputRanges = [float(x) for x in in_line.strip().split(",")[:-1]]

    weights = []
    biases = []

    for layernum in range(numLayers):
        previousLayerSize = layerSizes[layernum]
        currentLayerSize = layerSizes[layernum + 1]
        weights.append([])
        biases.append([])
        weights[layernum] = np.zeros((currentLayerSize, previousLayerSize))
        for i in range(currentLayerSize):
            in_line = f.readline()
            aux = [float(x) for x in in_line.strip().split(",")[:-1]]
            for j in range(previousLayerSize):
                weights[layernum][i][j] = aux[j]
        biases[layernum] = np.zeros(currentLayerSize)
        for i in range(currentLayerSize):
            in_line = f.readline()
            x = float(in_line.strip().split(",")[0])
            biases[layernum][i] = x

    f.close()
    nn = Model()

    inputs = nn.addVars(inputSize,
                        name="inputs",
                        lb=inputMinimums,
                        ub=inputMaximums)
    deltas = nn.addVars(inputSize, name="deltas")
    absDeltas = nn.addVars(inputSize, name="absDeltas")
    inputsN = nn.addVars(inputSize, name="inputsN")

    input_vals = [3919.704054253667, 0.0, 2.2683915791388873]

    for i in range(inputSize):
        nn.addConstr(inputs[i] == input_vals[i] + deltas[i])
        nn.addConstr(absDeltas[i] == abs_(deltas[i]))
        if i not in inputs_to_change:
            nn.addConstr(deltas[i] == 0)

    for i in inputs_to_change:
        #bound = (inputMaximums[i] - inputMinimums[i]) / 2
        #final_bound = min(bound, max_deltas)
        #deltas[i].lb = -1.0 * final_bound
        #deltas[i].ub = final_bound
        deltas[i].lb = inputMinimums[i] - input_vals[i]
        deltas[i].ub = inputMaximums[i] - input_vals[i]

    for i in range(inputSize):
        nn.addConstr(inputsN[i] == (inputs[i] - inputMeans[i]) /
                     inputRanges[i])

    layerOuts = {}
    layerOuts[1] = nn.addVars(layerSizes[1],
                              name="layerOuts[1]",
                              lb=-GRB.INFINITY,
                              ub=GRB.INFINITY)

    layerReluOuts = {}
    layerReluOuts[1] = nn.addVars(layerSizes[1],
                                  name="layerReluOuts[1]",
                                  lb=0,
                                  ub=GRB.INFINITY)

    nn.update()

    temp = []
    for i in range(layerSizes[1]):
        expr = LinExpr()
        for j in range(layerSizes[0]):
            expr.add(inputsN[j], weights[0][i][j])
        temp.append(expr)

    nn.addConstrs(layerOuts[1][i] == temp[i] + biases[0][i]
                  for i in range(layerSizes[1]))
    nn.addConstrs(layerReluOuts[1][i] == max_(layerOuts[1][i], 0)
                  for i in range(layerSizes[1]))

    for layernum in range(2, numLayers):
        layerOuts[layernum] = nn.addVars(layerSizes[layernum],
                                         name="layerOuts[" + str(layernum) +
                                         "]",
                                         lb=-GRB.INFINITY,
                                         ub=GRB.INFINITY)
        layerReluOuts[layernum] = nn.addVars(layerSizes[layernum],
                                             name="layerReluOuts[" +
                                             str(layernum) + "]",
                                             lb=0,
                                             ub=GRB.INFINITY)
        nn.update()
        temp = []
        for i in range(layerSizes[layernum]):
            expr = LinExpr()
            for j in range(layerSizes[layernum - 1]):
                expr.add(layerReluOuts[layernum - 1][j],
                         weights[layernum - 1][i][j])
            temp.append(expr)
        nn.addConstrs(layerOuts[layernum][i] == temp[i] +
                      biases[layernum - 1][i]
                      for i in range(layerSizes[layernum]))
        nn.addConstrs(
            layerReluOuts[layernum][i] == max_(layerOuts[layernum][i], 0)
            for i in range(layerSizes[layernum]))

    outputs = nn.addVars(layerSizes[-1],
                         name="outputs",
                         lb=-GRB.INFINITY,
                         ub=GRB.INFINITY)
    nn.update()

    temp = []
    for i in range(layerSizes[-1]):
        expr = LinExpr()
        for j in range(layerSizes[-2]):
            expr.add(layerReluOuts[numLayers - 1][j], weights[-1][i][j])
        temp.append(expr)

    nn.addConstrs(outputs[i] == temp[i] + biases[-1][i]
                  for i in range(layerSizes[-1]))

    denormalizedOuts = nn.addVars(outputSize,
                                  lb=-GRB.INFINITY,
                                  ub=GRB.INFINITY)
    for i in range(outputSize):
        nn.addConstr(denormalizedOuts[i] == outputs[i] * inputRanges[-1] +
                     inputMeans[-1])

    nn.addConstr(denormalizedOuts[2] <= denormalizedOuts[1])
    nn.addConstr(denormalizedOuts[1] <= denormalizedOuts[0])
    nn.addConstr(denormalizedOuts[0] <= denormalizedOuts[3])
    nn.addConstr(denormalizedOuts[3] <= denormalizedOuts[4])

    #nn.setObjective(quicksum(absDeltas))

    nn.Params.OutputFlag = False

    middle = time.time()
    nn.optimize()
    end = time.time()

    if nn.status == GRB.Status.OPTIMAL:
        glog.info([inputs[i].X for i in range(inputSize)])
        glog.info([denormalizedOuts[i].X for i in range(outputSize)])
        output = []
        output.append([denormalizedOuts[i].X for i in range(outputSize)])
        for i in inputs_to_change:
            output.append([i, deltas[i].X, 'Y', end - middle])
        return output
    else:
        output = []
        for i in inputs_to_change:
            output.append([i, 0, 0, 'N', end - middle])
        return output
コード例 #34
0
    def testBuildControlFlowGraph(self):
        pathPrefix = '../DataSamples'
        bId = 'test'
        log.info('[Test] Build CFG from ' + pathPrefix + '/' + bId + '.asm')
        cfgBuilder = ControlFlowGraphBuilder(bId, pathPrefix)
        cfgBuilder.buildControlFlowGraph()
        cfgBuilder.exportToNxGraph()
        expectedBlocks = [
            '-2',
            '-1',
            'ff',
            '401048',
            '401050',
            '401052',
            '401054',
            '401064',
            '40106d',
            '401076',
            '401079',
            '401080',
            '401084',
            '401090',
            '4010a3',
            '4010a9',
            '4010ac',
            '4010ae',
            '4010b3',
            '4010b5',
            '4010b7',
        ]
        expectedBlocks = [int(x, 16) for x in expectedBlocks]
        edgeDict = {
            '-2': ['401079', '401090'],
            'ff': ['4010b7'],
            '401048': ['401050', '401048'],
            '401050': ['401054'],
            '401054': ['401064'],
            '401064': ['40106d', '4010ae', '401084', '401064'],
            '40106d': ['401076', '401054'],
            '401076': ['401079'],
            '401079': ['401080', '401079', '-2'],
            '401084': ['401090', '4010a3', '401064'],
            '401090': ['4010a9', '-2'],
            '4010a3': ['4010a9'],
            '4010ac': ['4010ae'],
            '4010ae': ['401064', '-2'],
            '4010b3': ['4010b5', '401084'],
            '4010b5': ['4010b7', '-1'],
            '4010b7': ['ff'],
        }
        expectedEdges = []
        for (src, destinations) in edgeDict.items():
            for dst in destinations:
                expectedEdges.append((int(src, 16), int(dst, 16)))

        for block in expectedBlocks:
            self.assertTrue(block in cfgBuilder.cfg.nodes(),
                            '%s not in CFG' % block)
        for edge in expectedEdges:
            self.assertTrue(edge in cfgBuilder.cfg.edges(),
                            '(%s, %s) not in CFG' % (edge[0], edge[1]))

        self.assertEqual(cfgBuilder.cfg.number_of_nodes(), len(expectedBlocks),
                         '#nodes in CFG != expected #nodes')
        self.assertEqual(cfgBuilder.cfg.number_of_edges(), len(expectedEdges),
                         '#edge in CFG != expected #edges')
コード例 #35
0
def main():
    parser = argparse.ArgumentParser(
        description='Square Attack Hyperparameters.')
    parser.add_argument('--norm',
                        type=str,
                        required=True,
                        choices=['l2', 'linf'])
    parser.add_argument('--dataset', type=str, required=True)
    parser.add_argument('--exp-dir',
                        default='logs',
                        type=str,
                        help='directory to save results and logs')
    parser.add_argument(
        '--gpu',
        type=str,
        required=True,
        help='GPU number. Multiple GPUs are possible for PT models.')
    parser.add_argument(
        '--p',
        type=float,
        default=0.05,
        help=
        'Probability of changing a coordinate. Note: check the paper for the best values. '
        'Linf standard: 0.05, L2 standard: 0.1. But robust models require higher p.'
    )
    parser.add_argument('--epsilon', type=float, help='Radius of the Lp ball.')
    parser.add_argument('--max_queries', type=int, default=10000)
    parser.add_argument(
        '--json-config',
        type=str,
        default=
        '/home1/machen/meta_perturbations_black_box_attack/configures/square_attack_conf.json',
        help='a configures file to be passed in instead of arguments')
    parser.add_argument('--batch_size', type=int, default=100)
    parser.add_argument('--targeted', action="store_true")
    parser.add_argument('--target_type',
                        type=str,
                        default='increment',
                        choices=['random', 'least_likely', "increment"])
    parser.add_argument('--attack_defense', action="store_true")
    parser.add_argument('--defense_model', type=str, default=None)
    parser.add_argument('--arch',
                        default=None,
                        type=str,
                        help='network architecture')
    parser.add_argument('--test_archs', action="store_true")
    args = parser.parse_args()
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    if args.json_config:
        # If a json file is given, use the JSON file as the base, and then update it with args
        defaults = json.load(open(args.json_config))[args.dataset][args.norm]
        arg_vars = vars(args)
        arg_vars = {
            k: arg_vars[k]
            for k in arg_vars if arg_vars[k] is not None
        }
        defaults.update(arg_vars)
        args = SimpleNamespace(**defaults)

    if args.targeted and args.dataset == "ImageNet":
        args.max_queries = 50000
    args.exp_dir = os.path.join(
        args.exp_dir,
        get_exp_dir_name(args.dataset, args.norm, args.targeted,
                         args.target_type, args))
    os.makedirs(args.exp_dir, exist_ok=True)
    if args.test_archs:
        if args.attack_defense:
            log_file_path = osp.join(
                args.exp_dir, 'run_defense_{}.log'.format(args.defense_model))
        else:
            log_file_path = osp.join(args.exp_dir, 'run.log')
    elif args.arch is not None:
        if args.attack_defense:
            log_file_path = osp.join(
                args.exp_dir,
                'run_defense_{}_{}.log'.format(args.arch, args.defense_model))
        else:
            log_file_path = osp.join(args.exp_dir,
                                     'run_{}.log'.format(args.arch))
    set_log_file(log_file_path)
    if args.test_archs:
        archs = []
        if args.dataset == "CIFAR-10" or args.dataset == "CIFAR-100":
            for arch in MODELS_TEST_STANDARD[args.dataset]:
                test_model_path = "{}/train_pytorch_model/real_image_model/{}-pretrained/{}/checkpoint.pth.tar".format(
                    PY_ROOT, args.dataset, arch)
                if os.path.exists(test_model_path):
                    archs.append(arch)
                else:
                    log.info(test_model_path + " does not exists!")
        elif args.dataset == "TinyImageNet":
            for arch in MODELS_TEST_STANDARD[args.dataset]:
                test_model_list_path = "{root}/train_pytorch_model/real_image_model/{dataset}@{arch}*.pth.tar".format(
                    root=PY_ROOT, dataset=args.dataset, arch=arch)
                test_model_path = list(glob.glob(test_model_list_path))
                if test_model_path and os.path.exists(test_model_path[0]):
                    archs.append(arch)
        else:
            for arch in MODELS_TEST_STANDARD[args.dataset]:
                test_model_list_path = "{}/train_pytorch_model/real_image_model/{}-pretrained/checkpoints/{}*.pth".format(
                    PY_ROOT, args.dataset, arch)
                test_model_list_path = list(glob.glob(test_model_list_path))
                if len(test_model_list_path
                       ) == 0:  # this arch does not exists in args.dataset
                    continue
                archs.append(arch)
    else:
        assert args.arch is not None
        archs = [args.arch]
    args.arch = ", ".join(archs)
    log.info('Command line is: {}'.format(' '.join(sys.argv)))
    log.info("Log file is written in {}".format(log_file_path))
    log.info('Called with args:')
    print_args(args)
    attacker = SquareAttack(args.dataset,
                            args.batch_size,
                            args.targeted,
                            args.target_type,
                            args.epsilon,
                            args.norm,
                            max_queries=args.max_queries)
    for arch in archs:
        if args.attack_defense:
            save_result_path = args.exp_dir + "/{}_{}_result.json".format(
                arch, args.defense_model)
        else:
            save_result_path = args.exp_dir + "/{}_result.json".format(arch)
        if os.path.exists(save_result_path):
            continue
        log.info("Begin attack {} on {}, result will be saved to {}".format(
            arch, args.dataset, save_result_path))
        if args.attack_defense:
            model = DefensiveModel(args.dataset,
                                   arch,
                                   no_grad=True,
                                   defense_model=args.defense_model)
        else:
            model = StandardModel(args.dataset, arch, no_grad=True)
        model.cuda()
        model.eval()
        attacker.attack_all_images(args, arch, model, save_result_path)
コード例 #36
0
ファイル: propagate_test.py プロジェクト: rajesnal/dplearn
    def test_activate(self):
        log.info('running test_activate...')
        Z = np.array([-1, 2, 3])
        log.info('Z: {0}'.format(Z))

        A1, cache1 = activate(Z, "sigmoid")
        log.info('sigmoid: {0}'.format(A1))

        A2, cache2 = activate(Z, "relu")
        log.info('relu: {0}'.format(A2))

        Z_sig = np.array([0.26894142, 0.88079708, 0.95257413])
        self.assertTrue(np.allclose(A1, Z_sig))
        Z_rel = np.array([0, 2, 3])
        self.assertTrue(np.allclose(A2, Z_rel))

        self.assertTrue(np.array_equal(cache1, Z))
        self.assertTrue(np.array_equal(cache2, Z))
        self.assertTrue(np.array_equal(cache1, cache2))

        dZ1 = backward_single(A1, cache1, "sigmoid")
        dZ1_expected = np.array([0.05287709, 0.09247804, 0.04303412])
        log.info('sigmoid dZ: {0}'.format(dZ1))
        self.assertTrue(np.allclose(dZ1, dZ1_expected))

        dZ2 = backward_single(A2, cache2, "relu")
        dZ2_expected = np.array([0, 2, 3])
        log.info('relu dZ: {0}'.format(dZ2))
        self.assertTrue(np.allclose(dZ2, dZ2_expected))
コード例 #37
0
                    'Valid ------  epoch: {:d}    total_loss= {:6f} '.format(
                        epoch + 1, val_loss_value))

            if epoch % CFG.TRAIN.VAL_DISPLAY_STEP == 0:
                saver.save(sess=sess,
                           save_path=model_save_path,
                           global_step=epoch)
    sess.close()

    return


if __name__ == '__main__':
    # init args
    args = init_args()
    logger.info('start')
    if args.multi_gpus:
        logger.info('**************** Use multi gpus to train the model')
        train_shadownet_multi_gpu(dataset_dir_train=args.train_dataset_dir,
                                  dataset_dir_val=args.val_dataset_dir,
                                  weights_path=args.weights_path,
                                  char_dict_path=args.char_dict_path,
                                  ord_map_dict_path=args.ord_map_dict_path,
                                  model_save_dir=args.save_path)
    else:
        logger.info('***************** Use single gpu to train the model')
        train_shadownet(dataset_dir_train=args.train_dataset_dir,
                        dataset_dir_val=args.val_dataset_dir,
                        weights_path=args.weights_path,
                        char_dict_path=args.char_dict_path,
                        model_save_dir=args.save_path)
コード例 #38
0
def test_lanenet(image_path, weights_path):
    """

    :param image_path:
    :param weights_path:
    :return:
    """
    assert ops.exists(image_path), '{:s} not exist'.format(image_path)

    log.info('开始读取图像数据并进行预处理')
    t_start = time.time()
    image = cv2.imread(image_path, cv2.IMREAD_COLOR)
    image_vis = image
    image = cv2.resize(image, (512, 256), interpolation=cv2.INTER_LINEAR)
    image = image - VGG_MEAN
    log.info('图像读取完毕, 耗时: {:.5f}s'.format(time.time() - t_start))

    input_tensor = tf.placeholder(dtype=tf.float32,
                                  shape=[1, 256, 512, 3],
                                  name='input_tensor')
    phase_tensor = tf.constant('train', tf.string)

    net = lanenet_merge_model.LaneNet(phase=phase_tensor, net_flag='vgg')
    binary_seg_ret, instance_seg_ret = net.inference(input_tensor=input_tensor,
                                                     name='lanenet_loss')

    cluster = lanenet_cluster.LaneNetCluster()

    saver = tf.train.Saver()

    # Set sess configuration
    sess_config = tf.ConfigProto(device_count={'GPU': 0})
    sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TEST.GPU_MEMORY_FRACTION
    sess_config.gpu_options.allow_growth = CFG.TRAIN.TF_ALLOW_GROWTH
    sess_config.gpu_options.allocator_type = 'BFC'

    sess = tf.Session(config=sess_config)

    with sess.as_default():

        saver.restore(sess=sess, save_path=weights_path)

        t_start = time.time()
        binary_seg_image, instance_seg_image = sess.run(
            [binary_seg_ret, instance_seg_ret],
            feed_dict={input_tensor: [image]})
        t_cost = time.time() - t_start
        log.info('单张图像车道线预测耗时: {:.5f}s'.format(t_cost))

        mask_image = cluster.get_lane_mask(
            binary_seg_ret=binary_seg_image[0],
            instance_seg_ret=instance_seg_image[0])
        # mask_image = cluster.get_lane_mask_v2(instance_seg_ret=instance_seg_image[0])
        # mask_image = cv2.resize(mask_image, (image_vis.shape[1], image_vis.shape[0]),
        #                         interpolation=cv2.INTER_LINEAR)

        ele_mex = np.max(instance_seg_image[0], axis=(0, 1))
        for i in range(3):
            if ele_mex[i] == 0:
                scale = 1
            else:
                scale = 255 / ele_mex[i]
            instance_seg_image[0][:, :, i] *= int(scale)
        embedding_image = np.array(instance_seg_image[0], np.uint8)
        cv2.imwrite('embedding_mask.png', embedding_image)

        # mask_image = cluster.get_lane_mask_v2(instance_seg_ret=embedding_image)
        # mask_image = cv2.resize(mask_image, (image_vis.shape[1], image_vis.shape[0]),
        #                         interpolation=cv2.INTER_LINEAR)

        # plt.ion()
        plt.figure('mask_image')
        plt.imshow(mask_image[:, :, (2, 1, 0)])
        plt.figure('src_image')
        plt.imshow(image_vis[:, :, (2, 1, 0)])
        plt.figure('instance_image')
        plt.imshow(embedding_image[:, :, (2, 1, 0)])
        plt.figure('binary_image')
        plt.imshow(binary_seg_image[0] * 255, cmap='gray')
        plt.show()
        # plt.pause(3.0)

    sess.close()

    return
コード例 #39
0
ファイル: tutorial.py プロジェクト: ZiangYan/learn-new-tools
#!/usr/bin/env python
import gym
import glog as log


env = gym.make('CartPole-v0')
env.reset()
for iter_idx in range(100000):
    log.info('Iteration %d' % iter_idx)
    observation = env.reset()
    for t in xrange(100):
        env.render()
        print observation
        action = env.action_space.sample()
        observation, reward, done, info = env.step(action)
        if done:
            print "Episode finished after {} timesteps".format(t+1)
            break
コード例 #40
0
    def _grid_search(self):
        """Perform a grid search of training hyper-parameters.

        The model that does the best on the dev set will be stored.
        """
        save_path = tempfile.mkdtemp(prefix="VectorMappingMethod")

        def _compute_accuracy():
            self._session.run(self._local_init_op)
            self._session.run(self._dev_iterator.initializer)
            while True:
                try:
                    accuracy, _ = self._session.run(self._accuracy)
                except tf.errors.OutOfRangeError:
                    return accuracy

        best_accuracy, best_learning_rate, best_regularizer = None, None, None

        for learning_rate, regularizer in itertools.product(
                self._learning_rates, self._regularizers):
            # Train using this learning rate and regularizer.
            self._session.run(self._reset_op)
            best_accuracy_for_run = None
            epochs_since_improvement = 0
            epoch = 0
            step = 0
            glog.info(
                "\n\nTraining with learning_rate = %.5f, "
                "and regularizer = %.5f", learning_rate, regularizer)
            self._session.run(self._train_iterator.initializer)

            while epoch < self._MAX_EPOCHS:
                try:
                    loss = self._session.run(
                        self._train_op, {
                            self._learning_rate: learning_rate,
                            self._regularizer: regularizer
                        })
                    step += 1

                except tf.errors.OutOfRangeError:
                    epoch += 1
                    accuracy = _compute_accuracy()
                    log_suffix = ""
                    self._session.run(self._train_iterator.initializer)

                    if best_accuracy is None or accuracy > best_accuracy:
                        best_accuracy = accuracy
                        best_learning_rate = learning_rate
                        best_regularizer = regularizer
                        self._saver.save(self._session, save_path)
                        log_suffix += "*"

                    if (best_accuracy_for_run is None
                            or accuracy > best_accuracy_for_run):
                        epochs_since_improvement = 0
                        best_accuracy_for_run = accuracy
                        log_suffix += "*"

                    glog.info(
                        "epoch %i: step: %i, loss: %.3f, "
                        "dev accuracy: %.2f%% %s", epoch, step, loss,
                        accuracy * 100, log_suffix)

                    epochs_since_improvement += 1
                    if epochs_since_improvement >= 10:
                        glog.info(
                            "No improvement for %i epochs, terminating run.",
                            epochs_since_improvement)
                        break

        glog.info(
            "Best accuracy found was %.2f%%, with learning_rate = %.5f and "
            "regularizer = %.5f.", best_accuracy * 100, best_learning_rate,
            best_regularizer)
        self._saver.restore(self._session, save_path)
        shutil.rmtree(save_path)
コード例 #41
0
def main(argv):
    db_file = None
    list_file = None
    img_path = None
    ext = '.jpg'
    if_check = False
    help_msg = 'dataset_create_imagelist.py -i <lmdb> -p <image path> -o <list>\
--check\n\
-i <lmdb>           The input lmdb database file\n\
-o <list>           The output image list file\n\
-p <image path>     The path which store the downloaded images\n\
--check [optional]  Force to check if the jpg image can be loaded.\n\
                    Which will slow down the process. Default False'

    try:
        opts, args = getopt.getopt(argv, 'hi:p:o:', ['check'])
    except getopt.GetoptError:
        print help_msg
        sys.exit(2)

    for opt, arg in opts:
        if opt == '-h':
            print help_msg
            sys.exit()
        elif opt == '-i':
            db_file = arg
        elif opt == '-o':
            list_file = arg
        elif opt == '-p':
            img_path = arg
        elif opt == '--check':
            if_check = True
        else:
            print help_msg
            sys.exit(2)
    # Check arguments
    if db_file is None or list_file is None or img_path is None:
        print help_msg
        sys.exit(2)

    # Check if the image path exists
    log.info('Check image path %s' % img_path)
    if os.path.exists(img_path) is False:
        log.fatal('Can not locate the image path %s' % img_path)
        sys.exit(2)
    # Create the text list file
    log.info('Open the image list file %s' % list_file)
    try:
        fp = open(list_file, 'w')
    except IOError:
        log.fatal('Can not open %s for writing' % list_file)
        sys.exit(2)
    # open the lmdb file
    log.info('Open db file %s' % db_file)
    db = lt.open_db_ro(db_file)
    db_stat = db.stat()
    log.info('Total Entries: %d' % db_stat['entries'])
    bar = eb.EasyProgressBar()
    bar.set_end(db_stat['entries'])
    bar.start()
    counter = 0
    err_counter = 0
    # Iter the whole database
    with db.begin(write=False) as txn:
        with txn.cursor() as cur:
            for key, val in cur:
                counter += 1
                # Get the avaliable url to download photo
                try:
                    val_dic = yaml.load(val)
                    photo = myxml.parse_xml_to_etree(val_dic['photo'])
                    photo_id = photo.get('id')
                    focal_in35 = int(val_dic['focal_in35'])
                except:
                    err_counter += 1
                    continue
                # Filter our some error value
                if focal_in35 < 5 or focal_in35 > 200:
                    continue
                # Get the image full name
                if img_path[-1] == r'/':
                    img_name = img_path + photo_id + ext
                else:
                    img_name = img_path + r'/' + photo_id + ext
                img_name = os.path.abspath(img_name)
                # Check if the image exists
                if if_check:
                    # Load the image
                    try:
                        Image.open(img_name)
                    except:
                        err_counter += 1
                        continue
                else:
                    if os.path.exists(img_name) is False:
                        err_counter += 1
                        continue

                # Write the info to list file
                fp.writelines(img_name + ' %d\n' % focal_in35)
                bar.update(counter)
    # Finish the loop
    db.close()
    fp.close()
    bar.finish()
    log.info('Finished. errors: %d' % err_counter)
コード例 #42
0
  def __init__(self, name="Elevator", mass=None):
    super(Elevator, self).__init__(name)
    # Stall Torque in N m
    self.stall_torque = 2.402
    # Stall Current in Amps
    self.stall_current = 126.145
    # Free Speed in RPM
    self.free_speed = 5015.562
    # Free Current in Amps
    self.free_current = 1.170
    # Mass of the Elevator
    if mass is None:
      self.mass = 5.0
    else:
      self.mass = mass

    # Number of motors
    self.num_motors = 2.0
    # Resistance of the motor
    self.resistance = 12.0 / self.stall_current
    # Motor velocity constant
    self.Kv = ((self.free_speed / 60.0 * 2.0 * numpy.pi) /
               (12.0 - self.resistance * self.free_current))
    # Torque constant
    self.Kt = (self.num_motors * self.stall_torque) / self.stall_current
    # Gear ratio
    self.G = 8
    # Radius of pulley
    self.r = 0.0254

    # Control loop time step
    self.dt = 0.005

    # State is [position, velocity]
    # Input is [Voltage]

    C1 = self.Kt * self.G * self.G / (self.Kv * self.resistance * self.r * self.r * self.mass)
    C2 = self.G * self.Kt / (self.resistance * self.r * self.mass)

    self.A_continuous = numpy.matrix(
        [[0, 1],
         [0, -C1]])

    # Start with the unmodified input
    self.B_continuous = numpy.matrix(
        [[0],
         [C2]])

    self.C = numpy.matrix([[1, 0]])
    self.D = numpy.matrix([[0]])
    
    self.A, self.B = self.ContinuousToDiscrete(
        self.A_continuous, self.B_continuous, self.dt)

    controllability = controls.ctrb(self.A, self.B)

    q_pos = 0.015
    q_vel = 0.5
    self.Q = numpy.matrix([[(1.0 / (q_pos ** 2.0)), 0.0],
                           [0.0, (1.0 / (q_vel ** 2.0))]])

    self.R = numpy.matrix([[(1.0 / (12.0 ** 2.0))]])
    self.K = controls.dlqr(self.A, self.B, self.Q, self.R)

    glog.info('K %s', str(self.K))
    glog.info('Poles are %s', str(numpy.linalg.eig(self.A - self.B * self.K)[0]))

    self.rpl = 0.30
    self.ipl = 0.10
    self.PlaceObserverPoles([self.rpl + 1j * self.ipl,
                             self.rpl - 1j * self.ipl])

    q_pos = 0.05
    q_vel = 2.65
    self.Q = numpy.matrix([[(q_pos ** 2.0), 0.0],
                           [0.0, (q_vel ** 2.0)]])

    r_volts = 0.025
    self.R = numpy.matrix([[(r_volts ** 2.0)]])

    self.KalmanGain, self.Q_steady = controls.kalman(
        A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)

    self.L = self.A * self.KalmanGain
    glog.info('KalL is %s', str(self.L))

    # The box formed by U_min and U_max must encompass all possible values,
    # or else Austin's code gets angry.
    self.U_max = numpy.matrix([[12.0]])
    self.U_min = numpy.matrix([[-12.0]])

    self.InitializeState()
コード例 #43
0
def train_shadownet_multi_gpu(dataset_dir_train, dataset_dir_val, weights_path,
                              char_dict_path, ord_map_dict_path,
                              model_save_dir):
    """

    :param dataset_dir:
    :param weights_path:
    :param char_dict_path:
    :param ord_map_dict_path:
    :return:
    """
    # prepare dataset information
    NUM_CLASSES = get_num_class(char_dict_path)

    train_dataset = shadownet_data_feed_pipline.CrnnDataFeeder(
        dataset_dir=dataset_dir_train,
        char_dict_path=char_dict_path,
        ord_map_dict_path=ord_map_dict_path,
        flags='train')
    val_dataset = shadownet_data_feed_pipline.CrnnDataFeeder(
        dataset_dir=dataset_dir_train,
        char_dict_path=char_dict_path,
        ord_map_dict_path=ord_map_dict_path,
        flags='valid')
    """
    
    train_dataset = read_tfrecord.CrnnDataFeeder(
        dataset_dir=dataset_dir_train,
        char_dict_path=char_dict_path,
        flags='train')

    val_dataset = read_tfrecord.CrnnDataFeeder(
        dataset_dir=dataset_dir_val,
        char_dict_path=char_dict_path,
        flags='valid')
        
    """

    train_images, train_labels, train_images_paths = train_dataset.inputs(
        batch_size=CFG.TRAIN.BATCH_SIZE)
    val_images, val_labels, val_images_paths = val_dataset.inputs(
        batch_size=CFG.TRAIN.BATCH_SIZE)

    # set crnn net
    shadownet = crnn_model.ShadowNet(phase='train',
                                     hidden_nums=CFG.ARCH.HIDDEN_UNITS,
                                     layers_nums=CFG.ARCH.HIDDEN_LAYERS,
                                     num_classes=NUM_CLASSES)
    shadownet_val = crnn_model.ShadowNet(phase='test',
                                         hidden_nums=CFG.ARCH.HIDDEN_UNITS,
                                         layers_nums=CFG.ARCH.HIDDEN_LAYERS,
                                         num_classes=NUM_CLASSES)

    # set average container
    tower_grads = []
    train_tower_loss = []
    val_tower_loss = []
    batchnorm_updates = None
    train_summary_op_updates = None

    # set lr
    global_step = tf.Variable(0, name='global_step', trainable=False)
    learning_rate = tf.train.exponential_decay(
        learning_rate=CFG.TRAIN.LEARNING_RATE,
        global_step=global_step,
        decay_steps=CFG.TRAIN.LR_DECAY_STEPS,
        decay_rate=CFG.TRAIN.LR_DECAY_RATE,
        staircase=CFG.TRAIN.LR_STAIRCASE)

    # set up optimizer
    optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,
                                           momentum=0.9)

    # set distributed train op
    with tf.variable_scope(tf.get_variable_scope()):
        is_network_initialized = False
        for i in range(CFG.TRAIN.GPU_NUM):
            with tf.device('/gpu:{:d}'.format(i)):
                with tf.name_scope('tower_{:d}'.format(i)) as _:
                    train_loss, grads = compute_net_gradients(
                        train_images,
                        train_labels,
                        shadownet,
                        optimizer,
                        is_net_first_initialized=is_network_initialized)

                    is_network_initialized = True

                    # Only use the mean and var in the first gpu tower to update the parameter
                    if i == 0:
                        batchnorm_updates = tf.get_collection(
                            tf.GraphKeys.UPDATE_OPS)
                        train_summary_op_updates = tf.get_collection(
                            tf.GraphKeys.SUMMARIES)

                    tower_grads.append(grads)
                    train_tower_loss.append(train_loss)
                with tf.name_scope('validation_{:d}'.format(i)) as _:
                    val_loss, _ = compute_net_gradients(
                        val_images,
                        val_labels,
                        shadownet_val,
                        optimizer,
                        is_net_first_initialized=is_network_initialized)
                    val_tower_loss.append(val_loss)

    grads = average_gradients(tower_grads)
    avg_train_loss = tf.reduce_mean(train_tower_loss)
    avg_val_loss = tf.reduce_mean(val_tower_loss)

    # Track the moving averages of all trainable variables
    variable_averages = tf.train.ExponentialMovingAverage(
        CFG.TRAIN.MOVING_AVERAGE_DECAY, num_updates=global_step)
    variables_to_average = tf.trainable_variables(
    ) + tf.moving_average_variables()
    variables_averages_op = variable_averages.apply(variables_to_average)

    # Group all the op needed for training
    batchnorm_updates_op = tf.group(*batchnorm_updates)
    apply_gradient_op = optimizer.apply_gradients(grads,
                                                  global_step=global_step)
    train_op = tf.group(apply_gradient_op, variables_averages_op,
                        batchnorm_updates_op)

    # set tensorflow summary
    tboard_save_path = model_save_dir
    os.makedirs(tboard_save_path, exist_ok=True)

    summary_writer = tf.summary.FileWriter(tboard_save_path)

    avg_train_loss_scalar = tf.summary.scalar(name='average_train_loss',
                                              tensor=avg_train_loss)
    avg_val_loss_scalar = tf.summary.scalar(name='average_val_loss',
                                            tensor=avg_val_loss)
    learning_rate_scalar = tf.summary.scalar(name='learning_rate_scalar',
                                             tensor=learning_rate)
    train_merge_summary_op = tf.summary.merge(
        [avg_train_loss_scalar, learning_rate_scalar] +
        train_summary_op_updates)
    val_merge_summary_op = tf.summary.merge([avg_val_loss_scalar])

    # set tensorflow saver
    saver = tf.train.Saver()
    os.makedirs(model_save_dir, exist_ok=True)
    train_start_time = time.strftime('%Y-%m-%d-%H-%M-%S',
                                     time.localtime(time.time()))
    model_name = 'shadownet_{:s}.ckpt'.format(str(train_start_time))
    model_save_path = ops.join(model_save_dir, model_name)

    # set sess config
    sess_config = tf.ConfigProto(device_count={'GPU': CFG.TRAIN.GPU_NUM},
                                 allow_soft_placement=True)
    sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TRAIN.GPU_MEMORY_FRACTION
    sess_config.gpu_options.allow_growth = CFG.TRAIN.TF_ALLOW_GROWTH
    sess_config.gpu_options.allocator_type = 'BFC'

    # Set the training parameters
    train_epochs = CFG.TRAIN.EPOCHS

    logger.info('Global configuration is as follows:')
    logger.info(CFG)

    sess = tf.Session(config=sess_config)

    summary_writer.add_graph(sess.graph)

    with sess.as_default():
        epoch = 0
        tf.train.write_graph(
            graph_or_graph_def=sess.graph,
            logdir='',
            name='{:s}/shadownet_model.pb'.format(model_save_dir))

        if weights_path is None or not os.path.exists(weights_path) or len(
                os.listdir(weights_path)) < 5:
            logger.info('Training from scratch')
            init = tf.global_variables_initializer()
            sess.run(init)
        else:
            weights_path = tf.train.latest_checkpoint(weights_path)
            logger.info('Restore model from last model checkpoint {:s}'.format(
                weights_path))
            saver.restore(sess=sess, save_path=weights_path)
            epoch = sess.run(tf.train.get_global_step())

        train_cost_time_mean = []
        val_cost_time_mean = []

        while epoch < train_epochs:
            epoch += 1
            # training part
            t_start = time.time()

            _, train_loss_value, train_summary, lr = \
                sess.run(fetches=[train_op,
                                  avg_train_loss,
                                  train_merge_summary_op,
                                  learning_rate])

            if math.isnan(train_loss_value):
                raise ValueError('Train loss is nan')

            summary_writer.add_summary(summary=train_summary,
                                       global_step=epoch)

            if epoch % CFG.TRAIN.DISPLAY_STEP == 0:
                logger.info(
                    'lr= {:.5f}   epoch:{:6d}   total_loss= {:.5f} '.format(
                        lr,
                        epoch + 1,
                        train_loss_value,
                    ))

            if epoch % CFG.TRAIN.VAL_DISPLAY_STEP == 0:
                # validation part

                val_loss_value, val_summary = \
                    sess.run(fetches=[avg_val_loss,
                                      val_merge_summary_op])

                summary_writer.add_summary(val_summary, global_step=epoch)

                logger.info(
                    'Valid ------  epoch: {:d}    total_loss= {:6f} '.format(
                        epoch + 1, val_loss_value))

            if epoch % CFG.TRAIN.VAL_DISPLAY_STEP == 0:
                saver.save(sess=sess,
                           save_path=model_save_path,
                           global_step=epoch)
    sess.close()

    return
コード例 #44
0
ファイル: download_image.py プロジェクト: Akrit2013/PROJ1505
def main(argv):
    db_file = None
    skip_num = None
    data_path = '../data'
    overwrite = False
    help_msg = 'download_image.py -i <lmdbfile> -o[optional] <datapath>\
--overwrite[optional] --skip <num>\n\
-i <lmdbfile>       The input lmdb file contains the exif of photos\n\
-o <datapath>       The path where to store the downloaded photos\n\
--overwrite         If set, overwrite the exists photos, default not\n\
--skip <num>        Skip the first XX photos'

    try:
        opts, args = getopt.getopt(argv, 'hi:o:', ['overwrite', 'skip='])
    except getopt.GetoptError:
        print help_msg
        sys.exit(2)

    for opt, arg in opts:
        if opt == '-h':
            print help_msg
            sys.exit()
        elif opt == '-i':
            db_file = arg
        elif opt == '-o':
            data_path = arg
        elif opt == '--overwrite':
            overwrite = True
        elif opt == '--skip':
            skip_num = int(arg)
        else:
            print help_msg
            sys.exit(2)

    if db_file is None:
        print help_msg
        sys.exit(2)

    # Try to open the database file
    db = lt.open_db_ro(db_file)
    if db is None:
        log.fatal('\033[0;31mCan not open %s\033[0m' % db_file)
        sys.exit(2)

    # Get the entries from the database
    entries = db.stat()['entries']
    # Entries counter
    counter = 0
    # Check the data path
    if not tb.check_path(data_path):
        log.info('Create new dir %s' % data_path)
    # Iter the data base
    if skip_num is not None:
        log.info('Skipping the first %d entries...' % skip_num)
    with db.begin(write=False) as txn:
        with txn.cursor() as cur:
            for key, val in cur:
                counter += 1
                if skip_num is not None and counter < skip_num:
                    continue
                # Parse the val into dict
                val_dic = yaml.load(val)
                # Get the avaliable url to download photo
                photo = myxml.parse_xml_to_etree(val_dic['photo'])
                url = tb.get_url(photo, val_dic['urls'], True)
                # Download the url and save image
                log.info('Download %s from %s [%d/%d]' %
                         (key, url, counter, entries))
                try:
                    tb.download_url_and_save(url, key, overwrite, data_path)
                except:
                    log.error('\033[0;31mFailed to download %s from %s\033[0m'
                              % (key, url))
                    continue

    db.close()
コード例 #45
0
ファイル: glog_test.py プロジェクト: benley/python-glog
 def test_info(self):
     log.info('test')
コード例 #46
0
def log_info_time(tag,time):
    log.info("{} time is {}sec".format(tag,time))
コード例 #47
0
image_paths.sort(key=lambda k: int(os.path.basename(k).split(".")[0]))
print("# Frames = {}".format(len(image_paths)))

nf = len(image_paths)
if not os.path.exists("1407_annotated"):
    os.makedirs("1407_annotated")
a = read_vatic("1407.txt")
cs = random_colors(N=len(a), bright=True)
for i in range(nf):
    img = imread(image_paths[i])
    attrs = []
    boxes = []
    labels = []
    colors = []
    tidx = []
    j = 0
    for t in a:
        for f in a[t]["frames"]:
            if f == i and a[t]["frames"][f]["visible"]:
                boxes.append(a[t]["frames"][f]["box"])
                attrs.append(a[t]["frames"][f]["attribute"])
                labels.append(a[t]["label"])
                colors.append(cs[t])
                tidx.append(t)
                j += 1
                break
        j += 1
    log.info(len(boxes))
    fig = draw_images(img, boxes, labels, attrs, colors, tidx)
    fig.savefig("1407_annotated/{}.jpg".format(i), dpi=300)
コード例 #48
0
ファイル: kernel.py プロジェクト: unjambonakap/chdrft
def load_elf(kern, fil):
  assert fil
  if not isinstance(fil, ElfUtils):
    elf = ElfUtils(fil)
  else:
    elf = fil
  need_load = []
  for seg in elf.elf.iter_segments():
    s = Attributize(seg.header)
    print('GOT SEG ', hex(s.p_vaddr), hex(s.p_offset), s)
    if s.p_type == 'PT_LOAD' and s.p_memsz > 0:
      need_load.append(s)
  flag_mp = [
      (MEM_FLAGS.PF_X, uc.UC_PROT_EXEC),
      (MEM_FLAGS.PF_R, uc.UC_PROT_READ),
      (MEM_FLAGS.PF_W, uc.UC_PROT_WRITE),
  ]

  for s in need_load:
    flag_mem = 0
    for seg_flag, uc_flag in flag_mp:
      if seg_flag & s.p_flags:
        flag_mem = flag_mem | uc_flag

    flag_mem |= uc.UC_PROT_EXEC
    addr = s.p_vaddr
    sz = s.p_memsz
    align = s.p_align
    align = max(align, 4096)

    if s.p_paddr != 0:
      #base_addr = addr - s.p_offset
      base_addr = addr
      base_addr = base_addr - base_addr % align
    else:
      addr =base_addr = s.p_vaddr
    #base_addr = base_addr - addr % align
    seg_sz = sz + addr % align
    seg_sz = (sz + align - 1) & ~(align - 1)
    seg_sz = (seg_sz +4095) & ~4095

    print('LOADING ', flag_mem, hex(base_addr), hex(addr), seg_sz, hex(s.p_offset))
    kern.mem_map(base_addr, seg_sz, flag_mem)

    content = elf.get_seg_content(s)
    print('WRITTING ', hex(addr), len(content))
    kern.mu.mem_write(addr, content)

  kern.post_load()

  regs = kern.regs
  for note in elf.notes:
    if note.n_type != 'NT_PRSTATUS':
      continue
    print(len(note.status.raw))
    print(len(note.data))
    if kern.arch.typ == Arch.arm64:
      assert len(note.data) == 392
      buf = BufferParser(note.data[76+32+4:], arch=kern.arch)
      for i in range(31):
        regs[f'x{i}'] = buf.read_u64()
      regs.sp = buf.read_u64()
      regs.pc = buf.read_u64()
      regs.cpacr_el1 = buf.read_u64()
      glog.info(f'Loaded sp={regs.sp:x}, pc={regs.pc:x} cpacr={regs.cpacr_el1:x}')

      continue

    if 'status' in note:
      for r in note.status.pr_reg._fields:
        vx = r
        if not vx in regs:
          glog.info('could not load reg %s', r)
          continue
        v = note.status.pr_reg[r].get()
        regs[r] = v
      if kern.arch.typ == Arch.x86_64:
        fsbase = note.status.pr_reg.fs_base.get()
        gsbase = note.status.pr_reg.gs_base.get()
        kern.set_fs_base(fsbase)
        kern.set_gs_base(gsbase)
        assert kern.get_gs_base() == gsbase
        assert kern.get_fs_base() == fsbase

    print(note.status.pr_reg)

  return elf
コード例 #49
0
def log_info_elapsed_time(tag,elapsed_time):
    log.info("{} time is {}sec".format(tag,elapsed_time))
コード例 #50
0
def test_lanenet_batch(image_dir, weights_path, batch_size, save_dir=None):
    """

    :param image_dir:
    :param weights_path:
    :param batch_size:
    :param save_dir:
    :return:
    """
    assert ops.exists(image_dir), '{:s} not exist'.format(image_dir)

    log.info('开始获取图像文件路径...')
    image_path_list = glob.glob('{:s}/**/*.jpg'.format(image_dir), recursive=True) + \
                      glob.glob('{:s}/**/*.png'.format(image_dir), recursive=True) + \
                      glob.glob('{:s}/**/*.jpeg'.format(image_dir), recursive=True)

    input_tensor = tf.placeholder(dtype=tf.float32,
                                  shape=[None, 256, 512, 3],
                                  name='input_tensor')
    phase_tensor = tf.constant('train', tf.string)

    net = lanenet_merge_model.LaneNet(phase=phase_tensor, net_flag='vgg')
    binary_seg_ret, instance_seg_ret = net.inference(input_tensor=input_tensor,
                                                     name='lanenet_loss')

    cluster = lanenet_cluster.LaneNetCluster()

    saver = tf.train.Saver()

    # Set sess configuration
    sess_config = tf.ConfigProto(device_count={'GPU': 1})
    sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TEST.GPU_MEMORY_FRACTION
    sess_config.gpu_options.allow_growth = CFG.TRAIN.TF_ALLOW_GROWTH
    sess_config.gpu_options.allocator_type = 'BFC'

    sess = tf.Session(config=sess_config)

    with sess.as_default():

        saver.restore(sess=sess, save_path=weights_path)

        epoch_nums = int(math.ceil(len(image_path_list) / batch_size))

        for epoch in range(epoch_nums):
            log.info('[Epoch:{:d}] 开始图像读取和预处理...'.format(epoch))
            t_start = time.time()
            image_path_epoch = image_path_list[epoch * batch_size:(epoch + 1) *
                                               batch_size]
            image_list_epoch = [
                cv2.imread(tmp, cv2.IMREAD_COLOR) for tmp in image_path_epoch
            ]
            image_vis_list = image_list_epoch
            image_list_epoch = [
                cv2.resize(tmp, (512, 256), interpolation=cv2.INTER_LINEAR)
                for tmp in image_list_epoch
            ]
            image_list_epoch = [tmp - VGG_MEAN for tmp in image_list_epoch]
            t_cost = time.time() - t_start
            log.info('[Epoch:{:d}] 图像预处理耗时: {:.5f}s'.format(epoch, t_cost))

            t_start = time.time()
            binary_seg_images, instance_seg_images = sess.run(
                [binary_seg_ret, instance_seg_ret],
                feed_dict={input_tensor: image_list_epoch})
            t_cost = time.time() - t_start
            log.info(
                '[Epoch:{:d}] 预测{:d}张图像车道线, 共耗时: {:.5f}s, 平均每张耗时: {:.5f}s'.
                format(epoch, len(image_path_epoch), t_cost,
                       t_cost / len(image_path_epoch)))

            for index, binary_seg_image in enumerate(binary_seg_images):
                mask_image = cluster.get_lane_mask(
                    binary_seg_ret=binary_seg_image,
                    instance_seg_ret=instance_seg_images[index])
                mask_image = cv2.resize(mask_image,
                                        (image_vis_list[index].shape[1],
                                         image_vis_list[index].shape[0]),
                                        interpolation=cv2.INTER_LINEAR)
                # plt.ion()
                # plt.figure('mask_image')
                # plt.imshow(mask_image[:, :, (2, 1, 0)])
                # plt.figure('src_image')
                # plt.imshow(image_vis_list[index][:, :, (2, 1, 0)])
                # plt.pause(3.0)
                # plt.show()
                # plt.ioff()

                mask_image = cv2.addWeighted(image_vis_list[index], 1.0,
                                             mask_image, 1.0, 0)

                if save_dir is not None:
                    image_name = ops.split(image_path_epoch[index])[1]
                    image_save_path = ops.join(save_dir, image_name)
                    cv2.imwrite(image_save_path, mask_image)

    sess.close()

    return
コード例 #51
0
def main(argv):
    config_file = 'flickr.xml'
    db_file = 'flickr_info_lmdb'
    db_file_trash = None
    helpmsg = """fetch_image_info.py -c <configfile> -o <lmdbfile>
 -t[optional] <trashfile>"""

    try:
        opts, args = getopt.getopt(argv, "hc:o:t:")
    except getopt.GetoptError:
        print helpmsg
        sys.exit(2)

    for opt, arg in opts:
        if opt == '-h':
            print helpmsg
            sys.exit()
        elif opt == '-c':
            config_file = arg
        elif opt == '-o':
            db_file = arg
        elif opt == '-t':
            db_file_trash = arg
        else:
            print helpmsg
            sys.exit(2)

    if db_file_trash is None:
        db_file_trash = db_file + '_trash'
    # Parse the xml config file
    config = myxml.xmlconfig(config_file)
    g_time_show_marker = time.time()
    g_time_show = int(config.time_show)

    # Create the lmdb database
    # Check if the lmdb file is already exist
    db = lt.open_db(db_file)
    db_trash = lt.open_db(db_file_trash)
    # Start to use flickrapi walk through the flickr server
    flickr = flickrapi.FlickrAPI(config.key, config.secret)

    time_start_num = float(config.time_start)
    time_end_num = float(config.time_end)
    time_current_num = time_start_num

    log.info('The start time is %s' % tb.unixtime_to_datearr(time_start_num))
    log.info('The end time is %s' % tb.unixtime_to_datearr(time_end_num))
    if config.time_dynamic:
        log.info('The timing mode is dynamic')
        time_interval_num = float(config.time_interval_init)
    else:
        log.info('The timing mode is fixed')
        time_interval_num = float(config.time_interval)

    log.info('Time interval is %d days %d hours %d secs' %
             tuple(tb.seconds_to_days(time_interval_num)))

    scenes_list = config.get_scenes_labels()
    lens_list = config.get_lens_labels()
    log.info('The scenes list is %s' % str(scenes_list))
    log.info('The lens list is %s' % str(lens_list))
    # Start to loop the data
    log.info('Start to fetch the photo info')

    # Globel counter
    # Count the total photos fetched
    g_photo_counter = 0
    # Count the photos which pass the screen procedure
    g_qualified_counter = 0

    db_size = 0
    db_trash_size = 0
    while time_current_num < time_end_num:
        start_time = str(time_current_num)
        end_time = str(time_current_num + time_interval_num)
        time_current_num += time_interval_num
        text_str = None
        extra_str = config.urls + ', ' + 'tags'
        # Counter in this time slice
        qualified_counter = 0
        photo_counter_max = 0
        # Loop the labels
        for lens_label in lens_list:
            for scenes_label in scenes_list:
                photo_counter = 0
                text_str = lens_label + ', ' + scenes_label
                log.info('\033[1;33mFetch date %s-%s, label: %s\033[0m' %
                         (tb.unixtime_to_datearr(start_time),
                          tb.unixtime_to_datearr(end_time),
                          text_str))
                log.info('\033[1;33mTime interval: %d days %d hours \
%d secs\033[0m' %
                         tuple(tb.seconds_to_days(time_interval_num)))
                log.info('\033[1;33mFetch Photos: %d, Qualified Photos: %d\
, Db Size: %d, Trash Size: %d\033[0m'
                         % (g_photo_counter, g_qualified_counter,
                            db_size, db_trash_size))
                # Search the photos according to the label
                # A list to store all the fetched photos
                for photo in flickr.walk(tag_mode='all',
                                         text=text_str,
                                         min_upload_date=start_time,
                                         max_upload_date=end_time,
                                         privacy_filter=1,
                                         content_type=1,
                                         extras=extra_str,
                                         per_page=int(config.page_size)):
                    # Show the overall info if X sec passed
                    if time.time() - g_time_show_marker > g_time_show:
                        g_time_show_marker = time.time()
                        log.info('\033[1;33mFetch date %s-%s, label: %s\033[0m'
                                 % (tb.unixtime_to_datearr(start_time),
                                    tb.unixtime_to_datearr(end_time),
                                    text_str))
                        log.info('\033[1;33mFetch Photos: %d, Qualified Photos:\
%d, Db Size: %d, Trash Size: %d\033[0m' % (g_photo_counter,
                                 g_qualified_counter, db_size, db_trash_size))
                    photo_counter += 1
                    g_photo_counter += 1
                    # Check the database if the photo is already been recorded
                    if lt.check_photo_id(db, photo.get('id')):
                        # Update the label if needed
                        lt.update_label(db, photo.get('id'),
                                        [scenes_label, lens_label])
                        continue
                    if lt.check_photo_id(db_trash, photo.get('id')):
                        continue
                    # Check the photo, and fetch the exif if needed
                    try:
                        rst = tb.get_exif(flickr, photo, config)
                    except ConnectionError as e:
                        log.error('\033[0;31mFetch Exif Error:\033[0m %s' % e)
                        continue

                    # If photo info and exif is invalid, return None
                    if rst is None:
                        continue
                    else:
                        exif = rst[0]
                        stat = rst[1]
                        if stat:
                            qualified_counter += 1
                            g_qualified_counter += 1
                            db_size = lt.write_db(db, exif, photo,
                                                  [scenes_label, lens_label],
                                                  config)
                        else:
                            db_trash_size = lt.write_db(db_trash, exif, photo,
                                                        text_str, config)
                # Record the max batch size
                if photo_counter_max < photo_counter:
                    photo_counter_max = photo_counter

        # Finish the data slice loop, re-adjust the time_interval_num
        if config.time_dynamic:
            # Dynamically adjust the time interval according to
            # the max size of the fetch batch
            if photo_counter_max > 1.2 * int(config.batch_size):
                time_interval_num = int(time_interval_num / 1.2)
            elif photo_counter_max < 0.8 * int(config.batch_size):
                time_interval_num = int(time_interval_num / 0.8)

            if time_interval_num > float(config.time_interval_max):
                time_interval_num = float(config.time_interval_max)
            if time_interval_num < float(config.time_interval_min):
                time_interval_num = float(config.time_interval_min)

        # If the photo collected more than enough, make it stop
        if db_size > config.max_size:
            log.info('Total collected photos: %d, stop at %s' %
                     (db_size, tb.unixtime_to_datearr(end_time)))
            break

    # Finish the rest of the things.
    db.close()
    db_trash.close()
コード例 #52
0
def get_image(cfg, json_file):

    log.info("json_file : {}".format(json_file))
    to_path = "/aimldl-dat/data-gaze/AIML_Database"
    log.info("to_path : {}".format(to_path))

    IMAGE_API = cfg['IMAGE_API']
    USE_IMAGE_API = IMAGE_API['ENABLE']
    SAVE_LOCAL_COPY = True

    images_annotated = {'files': [], 'unique': set(), 'not_found': set()}
    res_lanes = {
        '0_lanes': 0,
        '1_lanes': 0,
        '2_lanes': 0,
        '3_lanes': 0,
        '4_lanes': 0,
        '5_lanes': 0,
        '6_lanes': 0
    }

    new_json = []

    tic = time.time()
    # log.info("\nrelease_anndb:-----------------------------")
    timestamp = ("{:%d%m%y_%H%M%S}").format(datetime.datetime.now())

    # log.info("json_file: {}".format(json_file))
    # log.info("to_path: {}".format(to_path))

    save_path = os.path.join(to_path, 'lnd-' + timestamp)
    # log.info("save_path: {}".format(save_path))
    common.mkdir_p(save_path)

    images_save_path = os.path.join(save_path, 'images')
    log.info("images_save_path: {}".format(images_save_path))
    common.mkdir_p(images_save_path)

    with open(json_file, 'r') as file:
        json_lines = file.readlines()

        # Iterate over each image
        # for line_index,val in enumerate(json_lines):
        no_of_ann = 0
        for line_index, val in tqdm.tqdm(enumerate(json_lines),
                                         total=len(json_lines)):
            with_abs_path = {'lanes': [], 'h_samples': [], 'raw_file': None}

            json_line = json_lines[line_index]
            sample = json.loads(json_line)
            image_name = sample['raw_file']
            lanes = sample['lanes']
            h_samples = sample['h_samples']
            res_lane = []

            # Download image
            if USE_IMAGE_API:
                get_img_from_url_success = annonutils.get_image_from_url(
                    IMAGE_API,
                    image_name,
                    images_save_path,
                    save_local_copy=SAVE_LOCAL_COPY,
                    resize_image=True)
                if get_img_from_url_success:
                    images_annotated['files'].append(image_name)
                    images_annotated['unique'].add(image_name)
                else:
                    images_annotated['not_found'].add(image_name)

            # Number of lanes
            for lane in lanes:
                lane_id_found = False
                for lane_id in lane:
                    if lane_id == -2:
                        continue
                    else:
                        lane_id_found = True
                        break
                if lane_id_found:
                    no_of_ann += 1
                    res_lane.append(lane)
            if len(res_lane) == 0:
                res_lanes['0_lanes'] = res_lanes['0_lanes'] + 1
            elif len(res_lane) == 1:
                res_lanes['1_lanes'] = res_lanes['1_lanes'] + 1
            elif len(res_lane) == 2:
                res_lanes['2_lanes'] = res_lanes['2_lanes'] + 1
            elif len(res_lane) == 3:
                res_lanes['3_lanes'] = res_lanes['3_lanes'] + 1
            elif len(res_lane) == 4:
                res_lanes['4_lanes'] = res_lanes['4_lanes'] + 1
            elif len(res_lane) == 5:
                res_lanes['5_lanes'] = res_lanes['5_lanes'] + 1
            elif len(res_lane) == 6:
                res_lanes['6_lanes'] = res_lanes['6_lanes'] + 1

            with_abs_path['lanes'] = lanes
            with_abs_path['h_samples'] = h_samples
            with_abs_path['raw_file'] = os.path.join(images_save_path,
                                                     image_name)

            new_json.append(with_abs_path)

    json_name = json_file.split('/')[-1]
    new_json_name = json_name.split('.')[0]
    with open(save_path + '/' + new_json_name + '-' + timestamp + '.json',
              'w') as outfile:
        for items in new_json:
            # log.info("items : {}".format(items))
            json.dump(items, outfile)
            outfile.write('\n')

    stats = {
        'files': len(images_annotated['files']),
        'unique': len(images_annotated['unique']),
        'not_found': len(images_annotated['not_found']),
        'no_of_ann': no_of_ann,
        'number_of_images_per_lane': res_lanes
    }

    log.info("\nstats: {}".format(stats))
    log.info('\nDone (t={:0.2f}s)\n'.format(time.time() - tic))

    with open(save_path + '/' + 'stats' + '-' + timestamp + '.json', 'w') as f:
        json.dump(stats, f)
コード例 #53
0
 def dump(self, *args):
     glog.info('Dump: %s', args)
     for cb in self.push_cbs:
         cb(*args)
コード例 #54
0
def start_evaluation(args):
    """Launches the evaluation process"""

    if args.dataset == 'vgg':
        dataset = VGGFace2(args.val,
                           args.v_list,
                           args.v_land,
                           landmarks_training=True)
    elif args.dataset == 'celeb':
        dataset = CelebA(args.val, args.v_land, test=True)
    else:
        dataset = NDG(args.val, args.v_land)

    if dataset.have_landmarks:
        log.info('Use alignment for the train data')
        dataset.transform = t.Compose(
            [Rescale((48, 48)), ToTensor(switch_rb=True)])
    else:
        exit()

    val_loader = DataLoader(dataset,
                            batch_size=args.val_batch_size,
                            num_workers=4,
                            shuffle=False,
                            pin_memory=True)

    model = models_landmarks['landnet']
    assert args.snapshot is not None
    log.info('Testing snapshot ' + args.snapshot + ' ...')
    model = load_model_state(model,
                             args.snapshot,
                             args.device,
                             eval_state=True)
    model.eval()
    cudnn.benchmark = True
    model = torch.nn.DataParallel(
        model,
        device_ids=[args.device],
    )

    log.info('Face landmarks model:')
    log.info(model)

    avg_err, per_point_avg_err, failures_rate = evaluate(val_loader, model)

    log.info('Avg RMSE error: {}'.format(avg_err))
    log.info('Per landmark RMSE error: {}'.format(per_point_avg_err))
    log.info('Failure rate: {}'.format(failures_rate))
コード例 #55
0
	def forward(self, bottom, top):
		wtErr = np.abs(bottom[0].data[...] - bottom[1].data[...]) * bottom[2].data[...]
		top[0].data[...] = self.param_.loss_weight * np.sum(wtErr)/float(self.batchSz_)	
		glog.info('Loss is %f' % top[0].data[0])
コード例 #56
0
	def forward(self, bottom, top):
		top[0].data[...] = self.param_.loss_weight * np.sum(np.abs(bottom[0].data[...].squeeze()\
													 - bottom[1].data[...].squeeze()))/float(self.batchSz_)	
		glog.info('Loss is %f' % top[0].data[0])
コード例 #57
0
def test_lanenet_batch(src_dir, weights_path, save_dir, save_json=True):
    """

    :param src_dir:
    :param weights_path:
    :param save_dir:
    :return:
    """
    assert ops.exists(src_dir), '{:s} not exist'.format(src_dir)

    os.makedirs(save_dir, exist_ok=True)

    input_tensor = tf.placeholder(dtype=tf.float32,
                                  shape=[1, 256, 512, 3],
                                  name='input_tensor')

    net = lanenet.LaneNet(phase='test', net_flag='vgg')
    binary_seg_ret, instance_seg_ret = net.inference(input_tensor=input_tensor,
                                                     name='lanenet_model')

    postprocessor = lanenet_postprocess.LaneNetPostProcessor()

    saver = tf.train.Saver()

    # Set sess configuration
    sess_config = tf.ConfigProto()
    sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TEST.GPU_MEMORY_FRACTION
    sess_config.gpu_options.allow_growth = CFG.TRAIN.TF_ALLOW_GROWTH
    sess_config.gpu_options.allocator_type = 'BFC'

    sess = tf.Session(config=sess_config)

    with sess.as_default():
        saver.restore(sess=sess, save_path=weights_path)

        image_list = glob.glob('{:s}/**/*.png'.format(src_dir), recursive=True)
        avg_time_cost = []
        #json_gt = [json.loads(line) for line in open('/Users/mylesfoley/git/lanenet-lane-detection/ROOT_DIR/TUSIMPLE_DATASET/test_set/label_data.json')]
        lane_list = []
        for index, image_path in tqdm.tqdm(enumerate(sorted(image_list)),
                                           total=len(image_list)):

            image = cv2.imread(image_path, cv2.IMREAD_COLOR)
            image_vis = image
            image = cv2.resize(image, (512, 256),
                               interpolation=cv2.INTER_LINEAR)
            image = image / 127.5 - 1.0

            t_start = time.time()
            binary_seg_image, instance_seg_image = sess.run(
                [binary_seg_ret, instance_seg_ret],
                feed_dict={input_tensor: [image]})
            avg_time_cost.append(time.time() - t_start)
            image_name = image_path.split('/')[-1]
            postprocess_result = postprocessor.postprocess(
                binary_seg_result=binary_seg_image[0],
                instance_seg_result=instance_seg_image[0],
                source_image=image_vis,
                raw_file=image_name)
            lane_list.append(postprocess_result['lane_data'])

            if save_json == True:
                if index == 0:
                    with open('Images/JSON/inf_data.json', 'w+') as json_file:
                        json.dump(postprocess_result['lane_data'], json_file)
                        json_file.write('\n')
                else:
                    with open('Images/JSON/inf_data.json', 'a+') as json_file:
                        json.dump(postprocess_result['lane_data'], json_file)
                        json_file.write('\n')
            image_name = image_path.split('/')[-1]
            if index % 10 == 0:
                log.info(
                    'Mean inference time every single image: {:.5f}s'.format(
                        np.mean(avg_time_cost)))
                avg_time_cost.clear()

            input_image_dir = image_path.split('/')[-2]
            input_image_name = image_path.split('/')[-1]
            output_image_dir = save_dir
            os.makedirs(output_image_dir, exist_ok=True)
            output_image_path = ops.join(output_image_dir, input_image_name)
            if ops.exists(output_image_dir):
                cv2.imwrite(output_image_path,
                            postprocess_result['source_image'])
                #cv2.imwrite(output_image_path, postprocess_result['mask_image'])
                #cv2.imwrite(output_image_path, binary_seg_image[0] * 255)

    return lane_list
コード例 #58
0
def test_lanenet_batch(src_dir, weights_path, save_dir):
    """

    :param src_dir:
    :param weights_path:
    :param save_dir:
    :return:
    """
    assert ops.exists(src_dir), '{:s} not exist'.format(src_dir)

    os.makedirs(save_dir, exist_ok=True)

    input_tensor = tf.placeholder(dtype=tf.float32,
                                  shape=[1, 256, 512, 3],
                                  name='input_tensor')

    net = lanenet.LaneNet(phase='test', net_flag='vgg')
    binary_seg_ret, instance_seg_ret = net.inference(input_tensor=input_tensor,
                                                     name='lanenet_model')

    postprocessor = lanenet_postprocess.LaneNetPostProcessor()

    saver = tf.train.Saver()

    # Set sess configuration
    sess_config = tf.ConfigProto()
    sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TEST.GPU_MEMORY_FRACTION
    sess_config.gpu_options.allow_growth = CFG.TRAIN.TF_ALLOW_GROWTH
    sess_config.gpu_options.allocator_type = 'BFC'

    sess = tf.Session(config=sess_config)

    with sess.as_default():

        saver.restore(sess=sess, save_path=weights_path)

        image_list = glob.glob('{:s}/**/*.jpg'.format(src_dir), recursive=True)
        avg_time_cost = []
        for index, image_path in tqdm.tqdm(enumerate(image_list),
                                           total=len(image_list)):

            image = cv2.imread(image_path, cv2.IMREAD_COLOR)
            image_vis = image
            image = cv2.resize(image, (512, 256),
                               interpolation=cv2.INTER_LINEAR)
            image = image / 127.5 - 1.0

            t_start = time.time()
            binary_seg_image, instance_seg_image = sess.run(
                [binary_seg_ret, instance_seg_ret],
                feed_dict={input_tensor: [image]})
            avg_time_cost.append(time.time() - t_start)

            postprocess_result = postprocessor.postprocess(
                binary_seg_result=binary_seg_image[0],
                instance_seg_result=instance_seg_image[0],
                source_image=image_vis)

            if index % 100 == 0:
                log.info(
                    'Mean inference time every single image: {:.5f}s'.format(
                        np.mean(avg_time_cost)))
                avg_time_cost.clear()

            input_image_dir = ops.split(image_path.split('clips')[1])[0][1:]
            input_image_name = ops.split(image_path)[1]
            output_image_dir = ops.join(save_dir, input_image_dir)
            os.makedirs(output_image_dir, exist_ok=True)
            output_image_path = ops.join(output_image_dir, input_image_name)
            if ops.exists(output_image_path):
                continue

            cv2.imwrite(output_image_path, postprocess_result['source_image'])

    return
コード例 #59
0
def main(argv):
    src_file = None
    dst_file = None
    config_file = None
    result_dict = {}
    help_msg = 'dataset_split_class.py -i <indexfile> -o <output> -c <config>\n\
-i <file>           The input index text file\n\
-o <file>           The output index text file\n\
-c <file>           The configure xml file'

    try:
        opts, args = getopt.getopt(argv, 'hi:c:o:')
    except getopt.GetoptError:
        print help_msg
        sys.exit(2)

    for opt, arg in opts:
        if opt == '-h':
            print help_msg
            sys.exit()
        elif opt == '-i':
            src_file = arg
        elif opt == '-o':
            dst_file = arg
        elif opt == '-c':
            config_file = arg
        else:
            print help_msg
            sys.exit(2)

    if src_file is None or dst_file is None or config_file is None:
        print help_msg
        sys.exit(2)

    # Check the config file
    log.info('Parsing configure file: %s' % config_file)
    config = myxml.parse_classifier_xml(config_file)
    result_dict = dict.fromkeys(config)
    if config is None:
        log.fatal('Parse configure file %s error' % config_file)
        sys.exit(2)
    # Check the src_file
    log.info('Opening %s' % src_file)
    try:
        src_fp = open(src_file, 'r')
    except IOError:
        log.fatal('Can not open %s' % src_file)
        sys.exit(2)
    # Open the dst file
    log.info('Opening %s' % dst_file)
    try:
        dst_fp = open(dst_file, 'w')
    except IOError:
        log.fatal('Can not open %s' % dst_file)
        sys.exit(2)

    # loop the src_file
    for line in src_fp.readlines():
        element = line.split(' ')
        if len(element) != 2:
            log.warn('\033[31mWARNING:\033[0m Extra space in %s' % line)
            continue
        focal_length = int(element[-1])
        image_path = element[0]
        # Get the label
        label = get_class(config, focal_length)
        if label is None:
            log.warn('\033[32mSKIP:\033[0m %s' % line)
            continue
        if result_dict[label] is None:
            result_dict[label] = 1
        else:
            result_dict[label] += 1
        # Write the new file
        dst_fp.writelines(image_path + ' %d\n' % label)

    src_fp.close()
    dst_fp.close()
    log.info('Final result: %s' % str(result_dict))
    log.info('Finished')
コード例 #60
0
def test_lanenet(image_path, weights_path):
    """

    :param image_path:
    :param weights_path:
    :return:
    """
    assert ops.exists(image_path), '{:s} not exist'.format(image_path)

    log.info('Start reading image and preprocessing')
    t_start = time.time()
    image = cv2.imread(image_path, cv2.IMREAD_COLOR)
    image_vis = image
    image = cv2.resize(image, (512, 256), interpolation=cv2.INTER_LINEAR)
    image = image / 127.5 - 1.0
    log.info('Image load complete, cost time: {:.5f}s'.format(time.time() -
                                                              t_start))

    input_tensor = tf.placeholder(dtype=tf.float32,
                                  shape=[1, 256, 512, 3],
                                  name='input_tensor')

    net = lanenet.LaneNet(phase='test', net_flag='vgg')
    binary_seg_ret, instance_seg_ret = net.inference(input_tensor=input_tensor,
                                                     name='lanenet_model')

    postprocessor = lanenet_postprocess.LaneNetPostProcessor()

    saver = tf.train.Saver()

    # Set sess configuration
    sess_config = tf.ConfigProto()
    sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TEST.GPU_MEMORY_FRACTION
    sess_config.gpu_options.allow_growth = CFG.TRAIN.TF_ALLOW_GROWTH
    sess_config.gpu_options.allocator_type = 'BFC'

    sess = tf.Session(config=sess_config)

    with sess.as_default():

        saver.restore(sess=sess, save_path=weights_path)

        t_start = time.time()
        binary_seg_image, instance_seg_image = sess.run(
            [binary_seg_ret, instance_seg_ret],
            feed_dict={input_tensor: [image]})
        t_cost = time.time() - t_start
        log.info('Single imgae inference cost time: {:.5f}s'.format(t_cost))

        postprocess_result = postprocessor.postprocess(
            binary_seg_result=binary_seg_image[0],
            instance_seg_result=instance_seg_image[0],
            source_image=image_vis)
        mask_image = postprocess_result['mask_image']

        for i in range(CFG.TRAIN.EMBEDDING_FEATS_DIMS):
            instance_seg_image[0][:, :,
                                  i] = minmax_scale(instance_seg_image[0][:, :,
                                                                          i])
        embedding_image = np.array(instance_seg_image[0], np.uint8)

        plt.figure('mask_image')
        plt.imshow(mask_image[:, :, (2, 1, 0)])
        plt.figure('src_image')
        plt.imshow(image_vis[:, :, (2, 1, 0)])
        plt.figure('instance_image')
        plt.imshow(embedding_image[:, :, (2, 1, 0)])
        plt.figure('binary_image')
        plt.imshow(binary_seg_image[0] * 255, cmap='gray')
        plt.show()

        cv2.imwrite('instance_mask_image.png', mask_image)
        cv2.imwrite('source_image.png', postprocess_result['source_image'])
        cv2.imwrite('binary_mask_image.png', binary_seg_image[0] * 255)

    sess.close()

    return