Example #1
0
    def execute_cmd(cmd_name):
        """Run ros command by sending GRPC requests and return HTTP response."""
        channel = grpc.insecure_channel(gflags.FLAGS.hmi_ros_node_service)
        stub = ros_node_pb2.HMIRosNodeStub(channel)

        response = None
        status = runtime_status.RuntimeStatus
        if cmd_name == 'reset':
            request = ros_node_pb2.ChangeDrivingModeRequest(
                action=ros_node_pb2.ChangeDrivingModeRequest.RESET_TO_MANUAL)
            response = stub.ChangeDrivingMode(request)

            # Update runtime status.
            tool_status = status.get_tools()
            if tool_status.playing_status != runtime_status_pb2.ToolStatus.PLAYING_NOT_READY:
                tool_status.playing_status = runtime_status_pb2.ToolStatus.PLAYING_READY_TO_CHECK

        elif cmd_name == 'start_auto_driving':
            request = ros_node_pb2.ChangeDrivingModeRequest(
                action=ros_node_pb2.ChangeDrivingModeRequest.START_TO_AUTO)
            response = stub.ChangeDrivingMode(request)

            # Update runtime status.
            status.get_tools(
            ).playing_status = runtime_status_pb2.ToolStatus.PLAYING
        else:
            error_msg = 'RosServiceApi: Unknown command "{}"'.format(cmd_name)
            glog.error(error_msg)
            return error_msg, httplib.BAD_REQUEST

        status.broadcast_status_if_changed()
        glog.info('Processed command "{}", and get response:{}'.format(
            cmd_name, response))
        return 'OK', httplib.OK
Example #2
0
    def _init_example_info_queue(self):
        """
        Read index file and put example info into SAMPLE_INFO_QUEUE
        :return:
        """
        log.info('Start filling {:s} dataset sample information queue...'.format(self._dataset_flag))

        t_start = time.time()
        for annotation_info in tqdm.tqdm(self._annotation_infos):
            image_path = annotation_info[0]
            lexicon_index = annotation_info[1]

            try:
                lexicon_label = [self._lexicon_infos[lexicon_index]]
                encoded_label, _ = self.encode_labels(lexicon_label)

                _SAMPLE_INFO_QUEUE.put((image_path, encoded_label[0]))
            except IndexError:
                log.error('Lexicon doesn\'t contain lexicon index {:d}'.format(lexicon_index))
                continue
        for i in range(self._writer_process_nums):
            _SAMPLE_INFO_QUEUE.put(_SENTINEL)
        log.debug('Complete filling dataset sample information queue[current size: {:d}], cost time: {:.5f}s'.format(
            _SAMPLE_INFO_QUEUE.qsize(),
            time.time() - t_start
        ))
Example #3
0
def load_idmap(idmap_filepath):
    """Load idmap
    
    Each row corresponds to an int_id, label_str pairing, tab-separated    
    
    Args:
        idmap_filepath (str): path to idmap
    
    Raises:
        FileNotFoundError: if idmap does not exist
        ValueError: Problems parsing 
    """
    idmap = {}
    if not os.path.exists(idmap_filepath):
        raise FileNotFoundError("{} not found.".format(idmap_filepath))
    try:
        with open(idmap_filepath, "r") as rf:
            for row_in in rf:
                try:
                    row = row_in.split("\t")
                    assert len(row) >= 2
                except:
                    row = row_in.split(" ")
                    assert len(row) >= 2
                id = int(row[0])
                label = row[1].strip()
                idmap[label] = id
    except:
        log.error("Problems parsing " + idmap_filepath)
        raise ValueError("Problems parsing " + idmap_filepath)
    return idmap
Example #4
0
File: gnet.py Project: cnsoft/GGSvr
def on_data(connection_id, buff):
    global MSG_MAP

    try:
        msg = msgpack.unpackb(buff)
    except Exception as e:
        glog.error("gnet>ERROR message format")
        raise e
        return

    # 强制转成[]
    msg = list(msg)
    glog.log("gnet>[recv] %s" % str(msg))
    msgid = msg[0]

    if not msgid in MSG_MAP:
        glog.error("gnet>on_data msgid not in MSG_MAP|msgid:%s" % str(msgid))
        return

    # 调用注册的函数
    func = MSG_MAP[msgid]
    # msg[0]原来是msgid, 在逻辑中没有用处, 所以改成connection_id
    msg[0] = connection_id

    try:
        func(msg) 
    except Exception, e:
        traceback.print_exc()
Example #5
0
def imshow(im):
    """
    This function is auto version of the plt.imshow.
    It will auto choose the imshow mode and max min
    val
    NOTE: THe im can be a file name, in this case, the function will
    read the image first and then disply
    """
    plt.ion()
    # If the im is a file name
    if type(im) == str:
        im = Image.open(im)
    im = np.array(im)
    cmap = None
    if im.ndim == 2:
        cmap = plt.get_cmap('gray')
    elif im.ndim == 3 and im.shape[2] == 1:
        im = im.reshape((im.shape[0], im.shape[1]))
        cmap = plt.get_cmap('gray')
    elif im.ndim >= 4:
        log.error('Can not display the image, shape %s' % str(im.shape))
        return None
    # If the im is not uint8 image, normalize it to 0~1
    if im.dtype != np.uint8:
        im = imnormalize(im)
    # disp the image
    plt.imshow(im, interpolation='none', cmap=cmap)
    plt.show()
    plt.pause(0.1)
    plt.ioff()
Example #6
0
def load_mat_list(mat_file, is_switch_HW, *var_name_list):
    """
    Different from the load_mat, this function can extract multi
    vars from one mat file
    =================================================================
    This function can load the mat file saved by matlab, and extract
    the matrix var from it.
    A numpy array will be returned.
    NOTE1:
        Since the height and width storage method is different between
        numpy and matlab, if is_switch_HW set to True, the H and W
        dimension will be switched.
    NOTE2:
        Since this function use h5py lib as interface. It ONLY support
        v7.3 format mat file
        HOWEVER, the HW switch only will be performs if the dim of mat
        between 2 to 4
    NOTE3:
        If error, return None
    """
    try:
        matfile = h5py.File(mat_file, 'r')
    except IOError, e:
        log.error('Can not open mat file: %s\n' % e)
        return None
Example #7
0
def imread(filename,
           dtype=np.float32,
           sfactor=1.0,
           image_type='rgb',
           flip=False):
    if exists(filename):
        image = cv2.imread(filename)
        if image_type == 'gray':
            image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        elif image_type == 'rgb':
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        else:
            glog.error('Unknown format')

        if dtype == np.float32 or dtype == np.float64:
            image = image.astype(dtype)
            image /= 255.

        if sfactor != 1.0:
            image = cv2.resize(image, None, fx=sfactor, fy=sfactor)

        if flip:
            image = image[:, ::-1, :]
    else:
        glog.error('File {0} not found'.format(filename))
        image = np.array([-1])

    return image
def test_init():
    global cc

    server_name = "TestClassifier"
    version = "0.0.0"
    # net_data_dir = "/tmp/test_caffe_classifier/net_data"
    # prop_type = "label"
    # prop_id_map = {}
    # module_id_map
    if os.path.exists(LOCAL_NET_DATA_DIR):
        try:
            cc = CaffeClassifier(server_name, version, LOCAL_NET_DATA_DIR)
            return
        except Exception as e:
            log.error(e)
            pass
    else:
        os.makedirs(LOCAL_NET_DATA_DIR)

    for key, net_data_bytes in generate_fileobj_from_s3_folder(
            S3_BUCKET_NAME, S3_NET_DATA_FOLDER):
        filename = os.path.basename(key)
        print("{}/{}".format(LOCAL_NET_DATA_DIR, filename))
        with open("{}/{}".format(LOCAL_NET_DATA_DIR, filename), "wb") as file:
            file.write(net_data_bytes.getvalue())

    cc = CaffeClassifier(server_name, version, LOCAL_NET_DATA_DIR)
Example #9
0
    def _embedding_feats_dbscan_cluster(self, embedding_image_feats):
        """
        dbscan cluster
        :param embedding_image_feats:
        :return:
        """
        db = DBSCAN(eps=self._cfg.POSTPROCESS.DBSCAN_EPS,
                    min_samples=self._cfg.POSTPROCESS.DBSCAN_MIN_SAMPLES)
        try:
            features = StandardScaler().fit_transform(embedding_image_feats)
            db.fit(features)
        except Exception as err:
            log.error(err)
            ret = {
                'origin_features': None,
                'cluster_nums': 0,
                'db_labels': None,
                'unique_labels': None,
                'cluster_center': None
            }
            return ret
        db_labels = db.labels_
        unique_labels = np.unique(db_labels)
        num_clusters = len(unique_labels)
        cluster_centers = db.components_

        ret = {
            'origin_features': features,
            'cluster_nums': num_clusters,
            'db_labels': db_labels,
            'unique_labels': unique_labels,
            'cluster_center': cluster_centers
        }

        return ret
Example #10
0
    def load_image_file(filename):
        try:
            image_pil = PIL.Image.open(filename)
        except IOError:
            log.error('Failed opening image file: {}'.format(filename))
            return
        # copy same white image to map
        img_w, img_h = image_pil.size
        if img_w > img_h:
            background = PIL.Image.new('RGB', (img_w, img_h * 2),
                                       (255, 255, 255))
            alter = (img_w, img_h, 0, 1)
        else:
            background = PIL.Image.new('RGB', (img_w * 2, img_h),
                                       (255, 255, 255))
            alter = (img_w, img_h, 1, 0)
        offset = (0, 0)
        background.paste(image_pil, offset)
        image_pil = background
        # apply orientation to image according to exif
        image_pil = utils.apply_exif_orientation(image_pil)

        with io.BytesIO() as f:
            ext = osp.splitext(filename)[1].lower()
            if PY2 and QT4:
                format = 'PNG'
            elif ext in ['.jpg', '.jpeg']:
                format = 'JPEG'
            else:
                format = 'PNG'
            image_pil.save(f, format=format)
            f.seek(0)
            return f.read(), alter
Example #11
0
    def _ncut_seg(self, data_list):
        """
        Use the ncut method to segment the image
        [image, slic_label/[slic_param], [ncut_param]]
        """
        img = data_list[0]
        param = data_list[1]
        param_cut = data_list[2]
        if param_cut is None:
            threahold = 0.001
        else:
            threahold = param_cut[0]
        # Check if the param is the super pixel label or the num of super pixel
        # to be segmented
        try:
            num = int(param[0])
            # super pixel seg
            label1 = segmentation.slic(img,
                                       compactness=10,
                                       n_segments=num,
                                       slic_zero=True)
        except:
            label1 = param
        # N-Cut
        g = graph.rag_mean_color(img, label1, mode='similarity')
        try:
            label2 = graph.cut_normalized(label1, g, thresh=threahold)
        except:
            log.error(
                '\033[01;31mERROR\033[0m: Unknow Error in cut_normalized \
function.')
            label2 = np.zeros(label1.shape).astype('int')
        return label2
Example #12
0
    def place_vm(self, req: VMEvent, current_server: Server):
        """ Place the VM on the give server. Also do CPU and RAM bookkeeping.
        :req: properties of to-be-allocated VM
        :current_server: a potential server for to-be-allocated VM
        :returns: status of the allocation. It is always const.SCHED_SUCCESS
        since current_server was already checked to have sufficient resources
        to accommodate the VM. """
        self.working_set_vms[req.vm_uuid] = (current_server, req)
        if current_server.allocate_cores(req.cores) != const.SCHED_SUCCESS:
            glog.error(
                'ERROR: failed to deduct {} cores from server {}'.format(
                    req.cores, current_server))
            sys.exit(1)
        if current_server.allocate_ram(req.ram) != const.SCHED_SUCCESS:
            glog.error('ERROR: failed to deduct {} RAM from server {}'.format(
                req.ram, current_server))
            sys.exit(1)

        # add the scheduled VM to the output
        end_time = time.process_time()
        timedelta = self.timedelta_obj.diff_in_microsecs(
            self.start_time, end_time)
        alloc = dict(server_id=current_server.id, vm=req, timedelta=timedelta)
        self.allocations.append(alloc)

        if self.debug:
            self.debug_stats_obj.stat['vm_uuid'] = req.vm_uuid

        #glog.debug('successfully allocated VM: {}'.format(req))
        return const.SCHED_SUCCESS
Example #13
0
def get_image(filename):
    res_code = 400
    apires = {
        'success': False,
        'status_code': res_code,
        'image_name': filename
    }
    ipm_image_path = os.path.join(ipm_image_save_path, filename)
    try:
        if os.path.exists(ipm_image_path):
            res = send_from_directory(ipm_image_save_path, filename)
        else:
            res_code = 400
            apires['status_code'] = res_code
            apires['description'] = 'File Not Found'
            res = Response(json.dumps(apires),
                           status=res_code,
                           mimetype='application/json')
            log.info("File Not Found")
    except Exception as e:
        res_code = 500
        apires['status_code'] = res_code
        apires['description'] = 'Internal Error'
        log.error("Exception in retriving image file", exc_info=True)
        res = Response(json.dumps(apires),
                       status=res_code,
                       mimetype='application/json')

    return res
    def run_dataset(self, dataset):
        """ Run a single dataset from an experiments file and save all output. This is done
            for every pipeline requested for the dataset.

            Args:
                dataset: a dataset to run as defined in the experiments yaml file.

            Returns: True if all pipelines for the dataset succeed, False otherwise.
        """
        dataset_name = dataset['name']

        has_a_pipeline_failed = False
        pipelines_to_run_list = dataset['pipelines']
        if len(pipelines_to_run_list) == 0:
            log.warning("Not running pipeline...")
        for pipeline_type in pipelines_to_run_list:
            # TODO shouldn't this break when a pipeline has failed? Not necessarily
            # if we want to plot all pipelines except the failing ones.
            evt.print_green("Run pipeline: %s" % pipeline_type)
            pipeline_success = self.__run_vio(dataset, pipeline_type)
            if pipeline_success:
                evt.print_green("Successful pipeline run.")
            else:
                log.error("Failed pipeline run!")
                has_a_pipeline_failed = True

        if not has_a_pipeline_failed:
            evt.print_green("All pipeline runs were successful.")
        evt.print_green("Finished evaluation for dataset: " + dataset_name)
        return not has_a_pipeline_failed
Example #15
0
def run_py_cmd(python_version,
               python_name,
               log_dir="./",
               log_name="./py.log",
               **kwargs):

    glog.info("start python : " + python_name)
    cmd = python_version + " " + python_name

    tmp = " "
    for key in kwargs:
        tmp = tmp + "--" + key + "=" + str(kwargs[key]) + " "

    log_name = os.path.join(log_dir, log_name)

    cmd_pearl = cmd + tmp
    cmd = cmd + tmp + " > " + log_name
    try:
        glog.info("run cmd : " + cmd)
        result = subprocess.getstatusoutput(cmd)
    except OSError:
        glog.error("run failed for throw except.")

    if int(result[0]) is not 0:
        glog.error(" run failed : " + cmd_pearl + " , result is : " +
                   result[1])
        sys.exit()
        return False
    else:
        glog.info(" run over   : " + cmd_pearl)
        return True
Example #16
0
def http_call(url, params={}, is_GET=False, http_header=None):
    # print "http_call:", url, params, is_GET
    import glog
    if (is_GET) and params:
        d = ""
        for (k, v) in params.iteritems():
            d += str(k) + "=" + str(v) + "&"
        url += "?" + d
    try:
        request = urllib2.Request(url)
        #http header
        if http_header:
            for k, v in http_header.iteritems():
                request.add_header(str(k), str(v))
        #enable cookie
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())

        if not is_GET:
            params = urllib.urlencode(params)
            response = opener.open(request, params, timeout=URLOPEN_TIMEOUT)
        else:
            response = opener.open(request, None, timeout=URLOPEN_TIMEOUT)
        result = response.read()
        # print "http_call result:", result
        return result
    except urllib2.HTTPError, e:
        glog.error("HTTPError:%s" % str(e.code))
Example #17
0
 def __init__(self, root):
     # Judge is the exif include the rst node
     stat = root.attrib['stat']
     if stat is None:
         self.exif = root
     elif stat == 'ok':
         self.exif = root.find('photo')
     else:
         log.error('The stat is not OK')
         self.camera = None
         self.focal_length = None
         self.aperture = None
         self.exposure = None
         self.lens = None
         self.focal_in35 = None
         self.brand = None
         self.id = None
         return
     self.exifs = self.exif.findall('exif')
     self.camera = self.get_camera()
     self.focal_length = self.__get_focal_length()
     self.aperture = self.__get_aperture()
     self.exposure = self.__get_exposure()
     self.lens = self.get_lens()
     self.focal_in35 = self.__get_focal_in35()
     self.brand = self.get_brand()
     self.id = self.exif.get('id')
def load_ie_model(model_xml, device, plugin_dir, cpu_extension=''):
    """Loads a model in the Inference Engine format"""
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=device, plugin_dirs=plugin_dir)
    if cpu_extension and 'CPU' in device:
        plugin.add_cpu_extension(cpu_extension)
    # Read IR
    log.info("Loading network files:\n\t%s\n\t%s", model_xml, model_bin)
    net = IENetwork(model=model_xml, weights=model_bin)

    if "CPU" in plugin.device:
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
        if not_supported_layers:
            log.error("Following layers are not supported by the plugin for specified device %s:\n %s",
                      plugin.device, ', '.join(not_supported_layers))
            log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
                      "or --cpu_extension command line argument")
            sys.exit(1)

    assert len(net.inputs.keys()) == 1, "Checker supports only single input topologies"
    assert len(net.outputs) == 1, "Checker supports only single output topologies"

    log.info("Preparing input blobs")
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    net.batch_size = 1

    # Loading model to the plugin
    log.info("Loading model to the plugin")
    exec_net = plugin.load(network=net)
    model = IEModel(exec_net, net.inputs, input_blob, out_blob)
    del net
    return model
Example #19
0
    def _process_request(self, request):
        """Send request_message through all the modules

        Args:
            request (Request): incoming request

        Returns:
            Response: outgoing response message
        """
        if not isinstance(request, Request):
            raise ValueError(f"{request} is of type {type(request)},"
                             f" not {Request}")
        log.debug(f"Processing: {request}")

        response = Response(request, self.schema_registry_url)

        for module in self.modules:
            log.info(f"Processing {request} in module: {module}")
            try:
                response = module.process(response)
            except Exception as e:
                log.error(traceback.format_exc())
                log.error(f"Processing {request} in {module} FAILED. "
                          f"Setting error code to {Codes.ERROR_PROCESSING}.")
                module.code = Codes.ERROR_PROCESSING
                response = module.update_and_return_response()

            log.info(f"Processing {request} in module: {module}"
                     f" ...Status: {module.code}")
            if response is not None:
                log.debug("response.to_dict():"
                          f" {json.dumps(response.to_dict(), indent=2)}")
        return response
Example #20
0
def crlite_determine_publish(*, existing_records, run_db):
    assert len(run_db) > 0, "There must be run identifiers"

    # First, if there are no existing records, then we upload the most recent
    # run.
    if not existing_records:
        return {"clear_all": True, "upload": [run_db.most_recent_id()]}

    # First, match the most recent filter-or-stash with its run identifier.
    # If we don't find any published run identifiers, just upload the most
    # recent run.
    published_run_ids = []
    unpublished_run_ids = []

    published_run_ids, unpublished_run_ids = run_db.split_run_ids_by(
        lambda run_id: run_id in existing_records[-1]["attachment"]["filename"]
    )

    if not published_run_ids:
        return {"clear_all": True, "upload": [run_db.most_recent_id()]}

    # verify sanity of the run_identifiers
    try:
        crlite_verify_run_id_sanity(
            run_db=run_db, identifiers_to_check=unpublished_run_ids
        )
    except SanityException as se:
        log.error(f"Failed to verify run ID sanity: {se}")
        return {"clear_all": True, "upload": [run_db.most_recent_id()]}

    return {"upload": unpublished_run_ids}
Example #21
0
def _create_dataset(input_dir, filenames, output_path):
  count = 0
  writer = tf.python_io.TFRecordWriter(output_path+'.cache')
  random.shuffle(filenames)
  for i, filename in enumerate(filenames):
    wave_path = input_dir + filename[0]
    txt_path = input_dir + filename[1]
    stem = os.path.splitext(os.path.split(filename[0])[-1])[0]
    wave = utils.read_wave(wave_path)
    text = utils.read_txt(txt_path)
    if len(wave) >= len(text):
      data = tf.train.Example(features=tf.train.Features(feature={
        'uid': tf.train.Feature(bytes_list=tf.train.BytesList(value=[stem.encode('utf-8')])),
        'audio/data': tf.train.Feature(float_list=tf.train.FloatList(value=wave.reshape([-1]).tolist())),
        'audio/shape': tf.train.Feature(int64_list=tf.train.Int64List(value=wave.shape)),
        'text': tf.train.Feature(int64_list=tf.train.Int64List(value=text)),
      }))
      writer.write(data.SerializeToString())
    else:
      glog.error("length of label(%d) is greater than feature(%d) at %s." % (len(text), len(wave), stem))

    count = i + 1
    if count % 50 == 0:
      glog.info('processed %d/%d files.' % (count, len(filenames)))
  if count % 1000 != 0:
    glog.info('processed %d/%d files.' % (count, len(filenames)))
  if os.path.exists(output_path):
    os.remove(output_path)
  if os.path.exists(output_path+'.cache'):
    os.renames(output_path+'.cache', output_path)
Example #22
0
def on_data(connection_id, buff):
    global MSG_MAP

    try:
        msg = msgpack.unpackb(buff)
    except Exception as e:
        glog.error("gnet>ERROR message format")
        raise e
        return

    # 强制转成[]
    msg = list(msg)
    glog.log("gnet>[recv] %s" % str(msg))
    msgid = msg[0]

    if not msgid in MSG_MAP:
        glog.error("gnet>on_data msgid not in MSG_MAP|msgid:%s" % str(msgid))
        return

    # 调用注册的函数
    func = MSG_MAP[msgid]
    # msg[0]原来是msgid, 在逻辑中没有用处, 所以改成connection_id
    msg[0] = connection_id

    try:
        func(msg)
    except Exception, e:
        traceback.print_exc()
def set_relative_area_filter(evaluator, ground_truth, area_range=(0, np.inf)):
    """ Set relative area filter
    """
    for image_id, gt in enumerate(ground_truth):
        # FIXME. This is a workaround for images without GT objects (?) #pylint: disable=fixme
        if evaluator.dataset[image_id].ground_truth_boxes is None:
            continue
        gt_of_interest = np.ones(len(gt['objects']), dtype=np.bool)
        if 'image_size' not in gt:
            path = gt['image']
            image = image_provider.get_image(path)
            if image is None or np.any(
                    image.shape == 0) or len(image.shape) < 2:
                logging.error('Invalid image "{}"'.format(path))
                raise ValueError('Failed to load image')
            image_size = image.shape[:2]
            gt['image_size'] = image_size
        image_size = gt['image_size']
        for i, obj in enumerate(gt['objects']):
            bbox = obj['bbox']
            w = bbox[2] - bbox[0]
            h = bbox[3] - bbox[1]
            area = np.sqrt(float(w * h) / image_size[0] / image_size[1])
            gt_of_interest[i] = (area_range[0] <= area <= area_range[1]) and (
                not obj.get('difficult', False))
        evaluator.dataset[image_id].set_filter(np.asarray(gt_of_interest))
Example #24
0
    def execute_cmd(cmd_name):
        """Run ros command by sending GRPC requests and return HTTP response."""
        ToolStatus = runtime_status_pb2.ToolStatus
        channel = grpc.insecure_channel(gflags.FLAGS.hmi_ros_node_service)
        stub = ros_node_pb2.HMIRosNodeStub(channel)

        response = None
        status = runtime_status.RuntimeStatus
        if cmd_name == 'reset':
            request = ros_node_pb2.ChangeDrivingModeRequest(
                action=ros_node_pb2.ChangeDrivingModeRequest.RESET_TO_MANUAL)
            response = stub.ChangeDrivingMode(request)

            # Update runtime status.
            tool_status = status.get_tools()
            if tool_status.playing_status != ToolStatus.PLAYING_NOT_READY:
                tool_status.playing_status = ToolStatus.PLAYING_READY_TO_CHECK

        elif cmd_name == 'start_auto_driving':
            request = ros_node_pb2.ChangeDrivingModeRequest(
                action=ros_node_pb2.ChangeDrivingModeRequest.START_TO_AUTO)
            response = stub.ChangeDrivingMode(request)

            # Update runtime status.
            status.get_tools().playing_status = ToolStatus.PLAYING
        else:
            error_msg = 'RosServiceApi: Unknown command "{}"'.format(cmd_name)
            glog.error(error_msg)
            return error_msg, httplib.BAD_REQUEST

        status.broadcast_status_if_changed()
        glog.info('Processed command "{}", and get response:{}'.format(
            cmd_name, response))
        return 'OK', httplib.OK
Example #25
0
    def write(self, doc, file, serialize=True, indent=None):
        """Write Avro doc 

        Args:
            doc (dict): dict of the avro document
            file_path (str): avro binary file output
            serialize (bool): whether to serialize avro doc to a binary file
            indent (int): if serialize=False, write json with indentation=indent
        
        Returns:
            bool: True if successfully wrote file
        """
        if serialize:
            try:
                bytes = self.encode(doc)
                with open(file, "wb") as wf:
                    wf.write(bytes)
            except avro.io.AvroTypeException:
                log.error(
                    "avro.io.AvroTypeException: the datum is not an example of the schema"
                )
                return False
            log.info("Encoded doc to file: {}".format(file))
        else:
            if not self.is_valid_avro_doc(doc):
                log.error("datum is not an example of schema")
                return False
            with open(file, "w") as wf:
                json.dump(doc, wf, indent=indent)
        return True
Example #26
0
    def init_config(self, config_file: str) -> None:
        # Load configuration file from disk
        config_file_path = Path(config_file)
        log.check(
            Path(config_file).exists(),
            message=f'Config file "{config_file_path}" doesn\'t exits',
        )
        with config_file_path.open() as config_file_fh:
            try:
                config_raw = yaml.safe_load(config_file_fh)
            except yaml.YAMLError as exc:
                log.error(
                    f'Configuration file "{config_file_path}" parse error\n{exc}'
                )

        # Set the config file we loaded
        self.config.config_file = str(config_file_path)
        # Load config from file
        for key, val in config_raw.items():
            # Don't load non_file_fields
            if key in self.config.non_file_fields:
                continue
            try:
                getattr(self.config, key)
            except AttributeError:
                # Warn when rubbish config is loaded
                log.warn(f'Loaded unknown configuration "{key}"')
            setattr(self.config, key, val)
        print("Finished Loading Config")
        return
Example #27
0
    def add_to_kinto(self, *, rw_client=None):
        if self.pemData is None:
            raise IntermediateRecordError(
                "Cannot upload a record not local: {}".format(self)
            )

        attributes = self._get_attributes(new=True)

        perms = {"read": ["system.Everyone"]}
        record = rw_client.create_record(
            collection=settings.KINTO_INTERMEDIATES_COLLECTION,
            data=attributes,
            permissions=perms,
        )
        self.kinto_id = record["data"]["id"]

        try:
            self._upload_pem(rw_client=rw_client)
        except KintoException as ke:
            log.error(
                "Failed to upload attachment. Removing stale intermediate record {}.".format(
                    self.kinto_id
                )
            )
            rw_client.delete_record(
                collection=settings.KINTO_INTERMEDIATES_COLLECTION,
                id=self.kinto_id,
            )
            log.error("Stale record deleted.")
            raise ke
Example #28
0
def read_vatic(fpath):
    annotations = {}
    with open(fpath) as fp:
        lines = fp.readlines()
        for line in lines:
            parts = line.split(" ")
            frame = {
                "box":
                [int(parts[2]),
                 int(parts[1]),
                 int(parts[4]),
                 int(parts[3])],
                "attribute": "",
                "visible": (int(parts[6]) == 0)
            }
            if len(parts) > 10:
                frame["attribute"] = parts[10].strip().strip('\"')
            if int(parts[0]) not in annotations:
                annotations[int(parts[0])] = {
                    "frames": {
                        int(parts[5]): frame
                    },
                    "label": parts[9].strip().strip('\"')
                }
            else:
                if parts[9].strip().strip('\"') != annotations[int(
                        parts[0])]["label"]:
                    log.error("An illegal track")
                annotations[int(parts[0])]["frames"][int(parts[5])] = frame
    fp.close()
    return annotations
Example #29
0
    def write(self, data, cfg_name):
        log.info(f"cfg::write : write config to file: {cfg_name} - start")
        # 1 read template
        try:
            template = self.j2_env.get_template(general_params.template)
        except jinja2.TemplateNotFound as nf:
            log.error(
                f"cfg::write : failed to write: Template {general_params.template} not found"
            )
            return
        except Exception as e:
            log.error(
                f"cfg::write : failed to write: Template {general_params.template} general error: {e}"
            )
            return

        # 2 add data-specific rows
        render_data = {
            'hostname': data['hostname'],
            'data_specific_part1': self.__generate_specific1(data),
            'data_specific_part2': self.__generate_specific2(data),
            'data_specific_part3': self.__generate_specific3(data),
        }

        content = template.render(**render_data)
        with open(os.path.join(self.out_dir, cfg_name), 'w',
                  encoding='utf-8') as cfg_file:
            cfg_file.writelines(content)

        log.info(f"cfg::write : successfully")
Example #30
0
    def post(self):
        """
        Clients report runtime status.

        The client must PUT a json object which can be converted to
        runtime_status_pb2.RuntimeStatus.
        """
        try:
            new_status = json_format.ParseDict(
                flask.request.get_json(), runtime_status_pb2.RuntimeStatus())
        except Exception as e:
            error_msg = 'RuntimeStatusApi: Cannot parse given data "{}"'.format(
                flask.request.get_json())
            glog.error(error_msg)
            return error_msg, httplib.BAD_REQUEST

        # Merge status.
        glog.info('RuntimeStatusApi: Put status:\n{}'.format(new_status))
        cur_status = runtime_status.RuntimeStatus
        for module in new_status.modules:
            cur_status.get_module(module.name).MergeFrom(module)
        for hardware in new_status.hardware:
            cur_status.get_hardware(hardware.name).MergeFrom(hardware)
        cur_status.get_tools().MergeFrom(new_status.tools)

        cur_status.broadcast_status_if_changed()
        return 'OK', httplib.OK
Example #31
0
def calc_surface_normal_map(dep, focal, radius, resize=None, mask=None):
    """
    This function calc the surface normal map according to the depth map
    focal:      The pixel wise virtual focal length
    radius:     The radius to calc the surface normal
    resize:     [height, width] the resolution of the output surface normal map
                which will save computation time
    mask:       If set, only true pixel should be used to calc normal value
    """
    if resize is None:
        normal = np.zeros([dep.shape[0], dep.shape[1], 3])
    else:
        if len(resize) != 2:
            log.error('ERROR: The resize parameter should be [height, width]')
            return None
        normal = np.zeros([resize[0], resize[1], 3])
    if mask is not None:
        if mask.shape[0] != dep.shape[0] or mask.shape[1] != dep.shape[1]:
            log.error('ERROR: The size of the mask not match with the size \
of the dep: %s vs %s' % (str(mask.shape), str(dep.shape)))
            return None

    # Generate the kernel
    kernel = _gen_kernel(radius)
    # Iter the pixels of the surface normal map
    for h in range(normal.shape[0]):
        _gen_row(dep, normal, mask, h, kernel, focal, resize)

    return normal
Example #32
0
    def push(self, responses, dst_url=None):
        """Pushes responses to a destination URL via the requests lib

        Args:
            responses (List[Response]): list of Response objects
            dst_url (str): url for POST endpoint
        """
        if not isinstance(responses, list):
            responses = [responses]

        log.debug(f"Pushing {len(responses)} items")
        for res in responses:
            assert isinstance(res, Response)
            if dst_url is None:
                dst_url = res.request.dst_url
            if dst_url:
                log.info(f"Pushing to {dst_url}")
                data = res.data
                log.info(f"data is of type {type(data)}")
                try:
                    ret = sender.post(dst_url,
                                      headers=self.auth_header,
                                      data=data)
                    log.info(f"requests.post(...) response: {ret}")
                except Exception as e:
                    log.error(e)
                    log.error(traceback.print_exc())
            else:
                log.info("No dst_url field in request. Not pushing response.")
Example #33
0
def imagesc(matrix, points=None, ax=None, cmap='jet', grid=True, show_axis=True, vmin=None, vmax=None):

    if len(matrix.shape) > 2:
        glog.error('Input has 3 dimensions, maybe use imshow?')
    else:
        show = False
        if ax is None:
            fig = plt.figure()
            ax = fig.add_subplot(111)
            show = True

        if points is not None:
            ax.plot(points[:, 0], points[:, 1], 'c.')

        if vmin is None:
            vmin = np.min(matrix)

        if vmax is None:
            vmax = np.max(matrix)

        ax.imshow(matrix, interpolation='nearest', cmap=cmap, vmin=vmin, vmax=vmax)
        if grid:
            ax.grid('on')
        if not show_axis:
            ax.axis('off')
        if show:
            plt.show()
Example #34
0
    def post(self, module_name):
        """Run module command and return HTTP response as (content, status)."""
        args = flask.request.form
        if args.get('execute_command'):
            return ModuleApi.execute_cmd(module_name, args['execute_command'])

        msg = 'Unknown query: {}'.format(args)
        glog.error(msg)
        return msg, httplib.BAD_REQUEST
Example #35
0
File: gnet.py Project: cnsoft/GGSvr
def sends(sub_svr_name, sub_svr_id, data):
    global SUB_SERVER_MAP, is_sub_server
    glog.log("gnet>[sends] (%s : %d) %s" % (sub_svr_name, sub_svr_id, str(data)))

    if is_sub_server:
        glog.error("gnet>can NOT do sends()")
        return

    trans.send(SUB_SERVER_MAP[sub_svr_name][sub_svr_id], _pack(data))
Example #36
0
File: gnet.py Project: cnsoft/GGSvr
def sendm(data):
    global is_sub_server
    glog.log("gnet>[sendm] %s" % str(data))

    if not is_sub_server:
        glog.error("gnet>can NOT do sendm()")
        return

    trans_sub.send(_pack(data))
Example #37
0
 def report_status(status):
     """Report status to HMI."""
     json_dict = json_format.MessageToDict(status, False, True)
     try:
         req = requests.post(
             gflags.FLAGS.hmi_runtime_status_api, json=json_dict)
         glog.info('Put RuntimeStatus: {}'.format(req.json()))
     except Exception as e:
         glog.error('Failed to put RuntimeStatus: {}'.format(e))
Example #38
0
    def post(self, hardware_name):
        """Query on hardware."""
        args = flask.request.form
        if args.get('execute_command'):
            return HardwareApi.execute_cmd(hardware_name,
                                           args['execute_command'])

        msg = 'Unknown query: {}'.format(args)
        glog.error(msg)
        return msg, httplib.BAD_REQUEST
Example #39
0
File: gnet.py Project: cnsoft/GGSvr
def _pack(data):
    buff = msgpack.packb(data)
    length = len(buff)

    if length > NET_MESSAGE_MAX_LEN:
        glog.error("gnet>_pack:data length is OVER")
        return

    len_buff = struct.pack('I', length)
    return len_buff + buff
Example #40
0
 def ParseMeta(self):
     """
     Parse meta info which doesn't need to scan the record.
     Currently we parse the record ID, header and channel list here.
     """
     self.record.header.ParseFromString(self._reader.get_headerstring())
     for chan in self._reader.get_channellist():
         self.record.channels[chan] = self._reader.get_messagenumber(chan)
     if len(self.record.channels) == 0:
         glog.error('No message found in record')
         return False
     return True
Example #41
0
    def report_hardware_status(hardware_status_list):
        """Report hardware status to HMI."""
        status_pb = runtime_status_pb2.RuntimeStatus()
        for hardware_status in hardware_status_list:
            status_pb.hardware.add().MergeFrom(hardware_status)

        json_dict = json_format.MessageToDict(status_pb, False, True)
        try:
            req = requests.post(
                gflags.FLAGS.hmi_runtime_status_api, json=json_dict)
            glog.info('Put HardwareStatus: {}'.format(req.json()))
        except Exception as e:
            glog.error('Failed to put HardwareStatus: {}'.format(e))
Example #42
0
    def Import(record_file):
        """Import one record."""
        parser = RecordParser(record_file)
        if not parser.ParseMeta():
            glog.error('Fail to parse record {}'.format(record_file))
            return

        parser.ParseMessages()
        doc = Mongo.pb_to_doc(parser.record)

        collection = Mongo.collection(gflags.FLAGS.mongo_collection_name)
        collection.replace_one({'path': parser.record.path}, doc, upsert=True)
        glog.info('Imported record {}'.format(record_file))
Example #43
0
def main():
    pb = easyprogressbar.EasyProgressBar()
    pb.start()
    for i in range(100):
        print i
        pb.update(i)
    pb.finish()
    try:
        tt()
    except ConnectionError as e:
        print e
        log.error("Error: %s" % e)
    print "finish"
  def Update(self, accelerometer_x, accelerometer_y, accelerometer_z, gyro_x):
    acceleration = math.sqrt(
        accelerometer_x**2 + accelerometer_y**2 + accelerometer_z**2)
    if acceleration < 0.9 or acceleration > 1.1:
      glog.error('bad total acceleration %f' % acceleration)
      # TODO(Brian): Tune this?
      return
    accelerometer_angle = math.atan2(accelerometer_x, accelerometer_z)
    Z = numpy.matrix([[accelerometer_angle], [gyro_x]])

    Y = Z - self.H * self.X_hat
    S = self.H * self.P * self.H.transpose() + self.R
    K = self.P * self.H.transpose() * numpy.linalg.inv(S)
    self.X_hat += K * Y
    self.P = (numpy.identity(K.shape[0]) - K * self.H) * self.P
Example #45
0
def remote_call(sub_svr_name, sub_svr_id, func, args, kwds):
    global call_data
    if not gnet.is_sub_server:
        call_id = _get_next_id()
        async_result = gevent.event.AsyncResult()
        call_data[call_id] = async_result 
                
        gnet.sends(sub_svr_name, sub_svr_id, [MSGID_REMOTE_CALL, call_id, func, args, kwds])
        # 等待返回
        is_seccess, return_data = async_result.get(True, REMOTE_CALL_TIMEOUT) 
        if is_seccess:
            glog.debug("remote_call>remote_call return:%s" % repr(return_data))
            return return_data
        else:
            glog.error("remote_call>remote_call return Exception:\n%s" % return_data)
Example #46
0
File: gnet.py Project: cnsoft/GGSvr
def on_sub_server_login(data):
    global SUB_SERVER_MAP
    
    connection_id = data[0]
    name = data[1]
    id = data[2]

    if not name in SUB_SERVER_MAP:
        glog.error("gnet>on_sub_server_login sub_server NOT def:%s" % name)     
        return

    # 记录链接为服务器链接
    glog.info("gnet>sub server login sucees: (%s : %d)" % (name, id))
    SUB_SERVER_MAP[name][id] = connection_id
    
    # test
    '''
Example #47
0
    def execute_cmd(hardware_name, cmd_name):
        """Execute hardware command."""
        # Run command on all hardware if the name is 'all'.
        if hardware_name == 'all':
            for hw_conf in config.Config.get_pb().hardware:
                HardwareApi._run_command(hw_conf, cmd_name)
            runtime_status.RuntimeStatus.broadcast_status_if_changed()
            return 'OK', httplib.OK

        # Or else, run command on the specified hardware.
        conf = config.Config.get_hardware(hardware_name)
        if conf is None:
            msg = 'Cannot find config for hardware {}'.format(hardware_name)
            glog.error(msg)
            return msg, httplib.BAD_REQUEST
        result = HardwareApi._run_command(conf, cmd_name)
        runtime_status.RuntimeStatus.broadcast_status_if_changed()
        return result
Example #48
0
def open_db(lmdb_file):
    """Check if the lmdb file already exists, and ask whether delete it
    or keep add entries based on it.
    return the lmdb object
    """
    if os.path.exists(lmdb_file):
        print('%s is already exists.' % lmdb_file)
        k = raw_input('Do you want to delete it?[y/n]:')
        if k == 'y' or k == 'Y':
            log.warn('Delete the %s file' % lmdb_file)
            shutil.rmtree(lmdb_file)
        elif k == 'n' or k == 'N':
            log.warn('Keep the %s file, and new entries will be added' %
                     lmdb_file)
        else:
            log.error('Wrong key input, exit the program')
            sys.exit(2)

    db = lmdb.open(lmdb_file, map_size=int(1e12))
    return db
Example #49
0
    def _run_command(conf, cmd_name):
        """Implementation of running command on hardware."""
        cmd = next((cmd for cmd in conf.supported_commands
                    if cmd.name == cmd_name), None)
        if cmd is None:
            msg = 'Cannot find command {} for hardware {}'.format(
                cmd_name, conf.name)
            glog.error(msg)
            return msg, httplib.BAD_REQUEST

        # Construct the command string by joining all components.
        cmd.command[0] = config.Config.get_realpath(cmd.command[0])
        cmd_str = ' '.join(cmd.command)
        system_cmd.run_in_background(cmd_str, cmd.stdout_file, cmd.stderr_file)

        # Update hardware status.
        hardware_status = runtime_status.RuntimeStatus.get_hardware(conf.name)
        if cmd_name == 'health_check':
            hardware_status.status = runtime_status_pb2.HardwareStatus.CHECKING

        return 'OK', httplib.OK
    def _cluster(prediction, bandwidth):
        """
        实现论文SectionⅡ的cluster部分
        :param prediction:
        :param bandwidth:
        :return:
        """
        ms = MeanShift(bandwidth, bin_seeding=True)
        # log.info('开始Mean shift聚类 ...')
        tic = time.time()
        try:
            ms.fit(prediction)
        except ValueError as err:
            log.error(err)
            return 0, [], []
        # log.info('Mean Shift耗时: {:.5f}s'.format(time.time() - tic))
        labels = ms.labels_
        cluster_centers = ms.cluster_centers_

        num_clusters = cluster_centers.shape[0]

        # log.info('聚类簇个数为: {:d}'.format(num_clusters))

        return num_clusters, labels, cluster_centers
                if save_dir is not None:
                    mask_image = cv2.addWeighted(image_vis_list[index], 1.0, mask_image, 1.0, 0)
                    image_name = ops.split(image_path_epoch[index])[1]
                    image_save_path = ops.join(save_dir, image_name)
                    cv2.imwrite(image_save_path, mask_image)

            log.info('[Epoch:{:d}] 进行{:d}张图像车道线聚类, 共耗时: {:.5f}s, 平均每张耗时: {:.5f}'.format(
                epoch, len(image_path_epoch), np.sum(cluster_time), np.mean(cluster_time)))

    sess.close()

    return


if __name__ == '__main__':
    # init args
    args = init_args()

    if args.save_dir is not None and not ops.exists(args.save_dir):
        log.error('{:s} not exist and has been made'.format(args.save_dir))
        os.makedirs(args.save_dir)

    if args.is_batch.lower() == 'false':
        # test hnet model on single image
        test_lanenet(args.image_path, args.weights_path, args.use_gpu)
    else:
        # test hnet model on a batch of image
        test_lanenet_batch(image_dir=args.image_path, weights_path=args.weights_path,
                           save_dir=args.save_dir, use_gpu=args.use_gpu, batch_size=args.batch_size)
Example #52
0
def run_test(claw, initial_X, goal, max_separation_error=0.01, show_graph=True, iterations=200):
  """Runs the claw plant on a given claw (claw) with an initial condition (initial_X) and goal (goal).

    The tests themselves are not terribly sophisticated; I just test for 
    whether the goal has been reached and whether the separation goes
    outside of the initial and goal values by more than max_separation_error.
    Prints out something for a failure of either condition and returns
    False if tests fail.
    Args:
      claw: claw object to use.
      initial_X: starting state.
      goal: goal state.
      show_graph: Whether or not to display a graph showing the changing
           states and voltages.
      iterations: Number of timesteps to run the model for."""

  claw.X = initial_X

  # Various lists for graphing things.
  t = []
  x_bottom = []
  x_top = []
  u_bottom = []
  u_top = []
  x_separation = []

  tests_passed = True

  # Bounds which separation should not exceed.
  lower_bound = (initial_X[1, 0] if initial_X[1, 0] < goal[1, 0]
                 else goal[1, 0]) - max_separation_error
  upper_bound = (initial_X[1, 0] if initial_X[1, 0] > goal[1, 0]
                 else goal[1, 0]) + max_separation_error

  for i in xrange(iterations):
    U = claw.K * (goal - claw.X)
    U = ScaleU(claw, U, claw.K, goal - claw.X)
    claw.Update(U)

    if claw.X[1, 0] > upper_bound or claw.X[1, 0] < lower_bound:
      tests_passed = False
      glog.info('Claw separation was %f', claw.X[1, 0])
      glog.info("Should have been between", lower_bound, "and", upper_bound)

    if claw.hard_pos_limits and \
      (claw.X[0, 0] > claw.hard_pos_limits[1] or
          claw.X[0, 0] < claw.hard_pos_limits[0] or
          claw.X[0, 0] + claw.X[1, 0] > claw.hard_pos_limits[1] or
          claw.X[0, 0] + claw.X[1, 0] < claw.hard_pos_limits[0]):
      tests_passed = False
      glog.info('Claws at %f and %f', claw.X[0, 0], claw.X[0, 0] + claw.X[1, 0])
      glog.info("Both should be in %s, definitely %s",
                claw.pos_limits, claw.hard_pos_limits)

    t.append(i * claw.dt)
    x_bottom.append(claw.X[0, 0] * 10.0)
    x_top.append((claw.X[1, 0] + claw.X[0, 0]) * 10.0)
    u_bottom.append(U[0, 0])
    u_top.append(U[1, 0])
    x_separation.append(claw.X[1, 0] * 10.0)

  if show_graph:
    pylab.plot(t, x_bottom, label='x bottom * 10')
    pylab.plot(t, x_top, label='x top * 10')
    pylab.plot(t, u_bottom, label='u bottom')
    pylab.plot(t, u_top, label='u top')
    pylab.plot(t, x_separation, label='separation * 10')
    pylab.legend()
    pylab.show()

  # Test to make sure that we are near the goal.
  if numpy.max(abs(claw.X - goal)) > 1e-4:
    tests_passed = False
    glog.error('X was %s Expected %s', str(claw.X), str(goal))

  return tests_passed
Example #53
0
def try_log():
    log.debug('2333happy deubgging....')
    log.info('it works')
    log.warn('something not ideal')
    log.error('something went wrong')
    log.fatal('AAAAAAAAAAA!')
Example #54
0
import glog as log

log.info("It works.")
log.warn("Something not ideal")
log.error("Something went wrong")
log.fatal("AAAAAAAAAAAAAAA!")

log.check(False)
def train_net(dataset_dir, weights_path=None, net_flag='vgg'):
    """

    :param dataset_dir:
    :param net_flag: choose which base network to use
    :param weights_path:
    :return:
    """
    train_dataset_file = ops.join(dataset_dir, 'train.txt')
    val_dataset_file = ops.join(dataset_dir, 'val.txt')

    assert ops.exists(train_dataset_file)

    train_dataset = lanenet_data_processor.DataSet(train_dataset_file)
    val_dataset = lanenet_data_processor.DataSet(val_dataset_file)

    with tf.device('/gpu:1'):
        input_tensor = tf.placeholder(dtype=tf.float32,
                                      shape=[CFG.TRAIN.BATCH_SIZE, CFG.TRAIN.IMG_HEIGHT,
                                             CFG.TRAIN.IMG_WIDTH, 3],
                                      name='input_tensor')
        binary_label_tensor = tf.placeholder(dtype=tf.int64,
                                             shape=[CFG.TRAIN.BATCH_SIZE, CFG.TRAIN.IMG_HEIGHT,
                                                    CFG.TRAIN.IMG_WIDTH, 1],
                                             name='binary_input_label')
        instance_label_tensor = tf.placeholder(dtype=tf.float32,
                                               shape=[CFG.TRAIN.BATCH_SIZE, CFG.TRAIN.IMG_HEIGHT,
                                                      CFG.TRAIN.IMG_WIDTH],
                                               name='instance_input_label')
        phase = tf.placeholder(dtype=tf.string, shape=None, name='net_phase')

        net = lanenet_merge_model.LaneNet(net_flag=net_flag, phase=phase)

        # calculate the loss
        compute_ret = net.compute_loss(input_tensor=input_tensor, binary_label=binary_label_tensor,
                                       instance_label=instance_label_tensor, name='lanenet_model')
        total_loss = compute_ret['total_loss']
        binary_seg_loss = compute_ret['binary_seg_loss']
        disc_loss = compute_ret['discriminative_loss']
        pix_embedding = compute_ret['instance_seg_logits']

        # calculate the accuracy
        out_logits = compute_ret['binary_seg_logits']
        out_logits = tf.nn.softmax(logits=out_logits)
        out_logits_out = tf.argmax(out_logits, axis=-1)
        out = tf.argmax(out_logits, axis=-1)
        out = tf.expand_dims(out, axis=-1)

        idx = tf.where(tf.equal(binary_label_tensor, 1))
        pix_cls_ret = tf.gather_nd(out, idx)
        accuracy = tf.count_nonzero(pix_cls_ret)
        accuracy = tf.divide(accuracy, tf.cast(tf.shape(pix_cls_ret)[0], tf.int64))

        global_step = tf.Variable(0, trainable=False)
        learning_rate = tf.train.exponential_decay(CFG.TRAIN.LEARNING_RATE, global_step,
                                                   100000, 0.1, staircase=True)
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            optimizer = tf.train.MomentumOptimizer(
                learning_rate=learning_rate, momentum=0.9).minimize(loss=total_loss,
                                                                    var_list=tf.trainable_variables(),
                                                                    global_step=global_step)

    # Set tf saver
    saver = tf.train.Saver()
    model_save_dir = 'model/tusimple_lanenet'
    if not ops.exists(model_save_dir):
        os.makedirs(model_save_dir)
    train_start_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
    model_name = 'tusimple_lanenet_{:s}_{:s}.ckpt'.format(net_flag, str(train_start_time))
    model_save_path = ops.join(model_save_dir, model_name)

    # Set tf summary
    tboard_save_path = 'tboard/tusimple_lanenet/{:s}'.format(net_flag)
    if not ops.exists(tboard_save_path):
        os.makedirs(tboard_save_path)
    train_cost_scalar = tf.summary.scalar(name='train_cost', tensor=total_loss)
    val_cost_scalar = tf.summary.scalar(name='val_cost', tensor=total_loss)
    train_accuracy_scalar = tf.summary.scalar(name='train_accuracy', tensor=accuracy)
    val_accuracy_scalar = tf.summary.scalar(name='val_accuracy', tensor=accuracy)
    train_binary_seg_loss_scalar = tf.summary.scalar(name='train_binary_seg_loss', tensor=binary_seg_loss)
    val_binary_seg_loss_scalar = tf.summary.scalar(name='val_binary_seg_loss', tensor=binary_seg_loss)
    train_instance_seg_loss_scalar = tf.summary.scalar(name='train_instance_seg_loss', tensor=disc_loss)
    val_instance_seg_loss_scalar = tf.summary.scalar(name='val_instance_seg_loss', tensor=disc_loss)
    learning_rate_scalar = tf.summary.scalar(name='learning_rate', tensor=learning_rate)
    train_merge_summary_op = tf.summary.merge([train_accuracy_scalar, train_cost_scalar,
                                               learning_rate_scalar, train_binary_seg_loss_scalar,
                                               train_instance_seg_loss_scalar])
    val_merge_summary_op = tf.summary.merge([val_accuracy_scalar, val_cost_scalar,
                                             val_binary_seg_loss_scalar, val_instance_seg_loss_scalar])

    # Set sess configuration
    sess_config = tf.ConfigProto(allow_soft_placement=True)
    sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TRAIN.GPU_MEMORY_FRACTION
    sess_config.gpu_options.allow_growth = CFG.TRAIN.TF_ALLOW_GROWTH
    sess_config.gpu_options.allocator_type = 'BFC'

    sess = tf.Session(config=sess_config)

    summary_writer = tf.summary.FileWriter(tboard_save_path)
    summary_writer.add_graph(sess.graph)

    # Set the training parameters
    train_epochs = CFG.TRAIN.EPOCHS

    log.info('Global configuration is as follows:')
    log.info(CFG)

    with sess.as_default():

        tf.train.write_graph(graph_or_graph_def=sess.graph, logdir='',
                             name='{:s}/lanenet_model.pb'.format(model_save_dir))

        if weights_path is None:
            log.info('Training from scratch')
            init = tf.global_variables_initializer()
            sess.run(init)
        else:
            log.info('Restore model from last model checkpoint {:s}'.format(weights_path))
            saver.restore(sess=sess, save_path=weights_path)

        # 加载预训练参数
        if net_flag == 'vgg' and weights_path is None:
            pretrained_weights = np.load(
                './data/vgg16.npy',
                encoding='latin1').item()

            for vv in tf.trainable_variables():
                weights_key = vv.name.split('/')[-3]
                try:
                    weights = pretrained_weights[weights_key][0]
                    _op = tf.assign(vv, weights)
                    sess.run(_op)
                except Exception as e:
                    continue

        train_cost_time_mean = []
        val_cost_time_mean = []
        for epoch in range(train_epochs):
            # training part
            t_start = time.time()

            with tf.device('/cpu:0'):
                gt_imgs, binary_gt_labels, instance_gt_labels = train_dataset.next_batch(CFG.TRAIN.BATCH_SIZE)
                gt_imgs = [cv2.resize(tmp,
                                      dsize=(CFG.TRAIN.IMG_WIDTH, CFG.TRAIN.IMG_HEIGHT),
                                      dst=tmp,
                                      interpolation=cv2.INTER_LINEAR)
                           for tmp in gt_imgs]

                gt_imgs = [tmp - VGG_MEAN for tmp in gt_imgs]
                binary_gt_labels = [cv2.resize(tmp,
                                               dsize=(CFG.TRAIN.IMG_WIDTH, CFG.TRAIN.IMG_HEIGHT),
                                               dst=tmp,
                                               interpolation=cv2.INTER_NEAREST)
                                    for tmp in binary_gt_labels]
                binary_gt_labels = [np.expand_dims(tmp, axis=-1) for tmp in binary_gt_labels]
                instance_gt_labels = [cv2.resize(tmp,
                                                 dsize=(CFG.TRAIN.IMG_WIDTH, CFG.TRAIN.IMG_HEIGHT),
                                                 dst=tmp,
                                                 interpolation=cv2.INTER_NEAREST)
                                      for tmp in instance_gt_labels]
            phase_train = 'train'

            _, c, train_accuracy, train_summary, binary_loss, instance_loss, embedding, binary_seg_img = \
                sess.run([optimizer, total_loss,
                          accuracy,
                          train_merge_summary_op,
                          binary_seg_loss,
                          disc_loss,
                          pix_embedding,
                          out_logits_out],
                         feed_dict={input_tensor: gt_imgs,
                                    binary_label_tensor: binary_gt_labels,
                                    instance_label_tensor: instance_gt_labels,
                                    phase: phase_train})

            if math.isnan(c) or math.isnan(binary_loss) or math.isnan(instance_loss):
                log.error('cost is: {:.5f}'.format(c))
                log.error('binary cost is: {:.5f}'.format(binary_loss))
                log.error('instance cost is: {:.5f}'.format(instance_loss))
                cv2.imwrite('nan_image.png', gt_imgs[0] + VGG_MEAN)
                cv2.imwrite('nan_instance_label.png', instance_gt_labels[0])
                cv2.imwrite('nan_binary_label.png', binary_gt_labels[0] * 255)
                return

            if epoch % 100 == 0:
                cv2.imwrite('image.png', gt_imgs[0] + VGG_MEAN)
                cv2.imwrite('binary_label.png', binary_gt_labels[0] * 255)
                cv2.imwrite('instance_label.png', instance_gt_labels[0])
                cv2.imwrite('binary_seg_img.png', binary_seg_img[0] * 255)

                for i in range(4):
                    embedding[0][:, :, i] = minmax_scale(embedding[0][:, :, i])
                embedding_image = np.array(embedding[0], np.uint8)
                cv2.imwrite('embedding.png', embedding_image)

            cost_time = time.time() - t_start
            train_cost_time_mean.append(cost_time)
            summary_writer.add_summary(summary=train_summary, global_step=epoch)

            # validation part
            with tf.device('/cpu:0'):
                gt_imgs_val, binary_gt_labels_val, instance_gt_labels_val \
                    = val_dataset.next_batch(CFG.TRAIN.VAL_BATCH_SIZE)
                gt_imgs_val = [cv2.resize(tmp,
                                          dsize=(CFG.TRAIN.IMG_WIDTH, CFG.TRAIN.IMG_HEIGHT),
                                          dst=tmp,
                                          interpolation=cv2.INTER_LINEAR)
                               for tmp in gt_imgs_val]
                gt_imgs_val = [tmp - VGG_MEAN for tmp in gt_imgs_val]
                binary_gt_labels_val = [cv2.resize(tmp,
                                                   dsize=(CFG.TRAIN.IMG_WIDTH, CFG.TRAIN.IMG_HEIGHT),
                                                   dst=tmp)
                                        for tmp in binary_gt_labels_val]
                binary_gt_labels_val = [np.expand_dims(tmp, axis=-1) for tmp in binary_gt_labels_val]
                instance_gt_labels_val = [cv2.resize(tmp,
                                                     dsize=(CFG.TRAIN.IMG_WIDTH, CFG.TRAIN.IMG_HEIGHT),
                                                     dst=tmp,
                                                     interpolation=cv2.INTER_NEAREST)
                                          for tmp in instance_gt_labels_val]
            phase_val = 'test'

            t_start_val = time.time()
            c_val, val_summary, val_accuracy, val_binary_seg_loss, val_instance_seg_loss = \
                sess.run([total_loss, val_merge_summary_op, accuracy, binary_seg_loss, disc_loss],
                         feed_dict={input_tensor: gt_imgs_val,
                                    binary_label_tensor: binary_gt_labels_val,
                                    instance_label_tensor: instance_gt_labels_val,
                                    phase: phase_val})

            if epoch % 100 == 0:
                cv2.imwrite('test_image.png', gt_imgs_val[0] + VGG_MEAN)

            summary_writer.add_summary(val_summary, global_step=epoch)

            cost_time_val = time.time() - t_start_val
            val_cost_time_mean.append(cost_time_val)

            if epoch % CFG.TRAIN.DISPLAY_STEP == 0:
                log.info('Epoch: {:d} total_loss= {:6f} binary_seg_loss= {:6f} instance_seg_loss= {:6f} accuracy= {:6f}'
                         ' mean_cost_time= {:5f}s '.
                         format(epoch + 1, c, binary_loss, instance_loss, train_accuracy,
                                np.mean(train_cost_time_mean)))
                train_cost_time_mean.clear()

            if epoch % CFG.TRAIN.TEST_DISPLAY_STEP == 0:
                log.info('Epoch_Val: {:d} total_loss= {:6f} binary_seg_loss= {:6f} '
                         'instance_seg_loss= {:6f} accuracy= {:6f} '
                         'mean_cost_time= {:5f}s '.
                         format(epoch + 1, c_val, val_binary_seg_loss, val_instance_seg_loss, val_accuracy,
                                np.mean(val_cost_time_mean)))
                val_cost_time_mean.clear()

            if epoch % 2000 == 0:
                saver.save(sess=sess, save_path=model_save_path, global_step=epoch)
    sess.close()

    return
Example #56
0
 def test_error(self):
     log.error('test')
Example #57
0
def main(argv):
    db_file = None
    skip_num = None
    data_path = '../data'
    overwrite = False
    help_msg = 'download_image.py -i <lmdbfile> -o[optional] <datapath>\
--overwrite[optional] --skip <num>\n\
-i <lmdbfile>       The input lmdb file contains the exif of photos\n\
-o <datapath>       The path where to store the downloaded photos\n\
--overwrite         If set, overwrite the exists photos, default not\n\
--skip <num>        Skip the first XX photos'

    try:
        opts, args = getopt.getopt(argv, 'hi:o:', ['overwrite', 'skip='])
    except getopt.GetoptError:
        print help_msg
        sys.exit(2)

    for opt, arg in opts:
        if opt == '-h':
            print help_msg
            sys.exit()
        elif opt == '-i':
            db_file = arg
        elif opt == '-o':
            data_path = arg
        elif opt == '--overwrite':
            overwrite = True
        elif opt == '--skip':
            skip_num = int(arg)
        else:
            print help_msg
            sys.exit(2)

    if db_file is None:
        print help_msg
        sys.exit(2)

    # Try to open the database file
    db = lt.open_db_ro(db_file)
    if db is None:
        log.fatal('\033[0;31mCan not open %s\033[0m' % db_file)
        sys.exit(2)

    # Get the entries from the database
    entries = db.stat()['entries']
    # Entries counter
    counter = 0
    # Check the data path
    if not tb.check_path(data_path):
        log.info('Create new dir %s' % data_path)
    # Iter the data base
    if skip_num is not None:
        log.info('Skipping the first %d entries...' % skip_num)
    with db.begin(write=False) as txn:
        with txn.cursor() as cur:
            for key, val in cur:
                counter += 1
                if skip_num is not None and counter < skip_num:
                    continue
                # Parse the val into dict
                val_dic = yaml.load(val)
                # Get the avaliable url to download photo
                photo = myxml.parse_xml_to_etree(val_dic['photo'])
                url = tb.get_url(photo, val_dic['urls'], True)
                # Download the url and save image
                log.info('Download %s from %s [%d/%d]' %
                         (key, url, counter, entries))
                try:
                    tb.download_url_and_save(url, key, overwrite, data_path)
                except:
                    log.error('\033[0;31mFailed to download %s from %s\033[0m'
                              % (key, url))
                    continue

    db.close()
Example #58
0
def main(argv):
    config_file = 'flickr.xml'
    db_file = 'flickr_info_lmdb'
    db_file_trash = None
    helpmsg = """fetch_image_info.py -c <configfile> -o <lmdbfile>
 -t[optional] <trashfile>"""

    try:
        opts, args = getopt.getopt(argv, "hc:o:t:")
    except getopt.GetoptError:
        print helpmsg
        sys.exit(2)

    for opt, arg in opts:
        if opt == '-h':
            print helpmsg
            sys.exit()
        elif opt == '-c':
            config_file = arg
        elif opt == '-o':
            db_file = arg
        elif opt == '-t':
            db_file_trash = arg
        else:
            print helpmsg
            sys.exit(2)

    if db_file_trash is None:
        db_file_trash = db_file + '_trash'
    # Parse the xml config file
    config = myxml.xmlconfig(config_file)
    g_time_show_marker = time.time()
    g_time_show = int(config.time_show)

    # Create the lmdb database
    # Check if the lmdb file is already exist
    db = lt.open_db(db_file)
    db_trash = lt.open_db(db_file_trash)
    # Start to use flickrapi walk through the flickr server
    flickr = flickrapi.FlickrAPI(config.key, config.secret)

    time_start_num = float(config.time_start)
    time_end_num = float(config.time_end)
    time_current_num = time_start_num

    log.info('The start time is %s' % tb.unixtime_to_datearr(time_start_num))
    log.info('The end time is %s' % tb.unixtime_to_datearr(time_end_num))
    if config.time_dynamic:
        log.info('The timing mode is dynamic')
        time_interval_num = float(config.time_interval_init)
    else:
        log.info('The timing mode is fixed')
        time_interval_num = float(config.time_interval)

    log.info('Time interval is %d days %d hours %d secs' %
             tuple(tb.seconds_to_days(time_interval_num)))

    scenes_list = config.get_scenes_labels()
    lens_list = config.get_lens_labels()
    log.info('The scenes list is %s' % str(scenes_list))
    log.info('The lens list is %s' % str(lens_list))
    # Start to loop the data
    log.info('Start to fetch the photo info')

    # Globel counter
    # Count the total photos fetched
    g_photo_counter = 0
    # Count the photos which pass the screen procedure
    g_qualified_counter = 0

    db_size = 0
    db_trash_size = 0
    while time_current_num < time_end_num:
        start_time = str(time_current_num)
        end_time = str(time_current_num + time_interval_num)
        time_current_num += time_interval_num
        text_str = None
        extra_str = config.urls + ', ' + 'tags'
        # Counter in this time slice
        qualified_counter = 0
        photo_counter_max = 0
        # Loop the labels
        for lens_label in lens_list:
            for scenes_label in scenes_list:
                photo_counter = 0
                text_str = lens_label + ', ' + scenes_label
                log.info('\033[1;33mFetch date %s-%s, label: %s\033[0m' %
                         (tb.unixtime_to_datearr(start_time),
                          tb.unixtime_to_datearr(end_time),
                          text_str))
                log.info('\033[1;33mTime interval: %d days %d hours \
%d secs\033[0m' %
                         tuple(tb.seconds_to_days(time_interval_num)))
                log.info('\033[1;33mFetch Photos: %d, Qualified Photos: %d\
, Db Size: %d, Trash Size: %d\033[0m'
                         % (g_photo_counter, g_qualified_counter,
                            db_size, db_trash_size))
                # Search the photos according to the label
                # A list to store all the fetched photos
                for photo in flickr.walk(tag_mode='all',
                                         text=text_str,
                                         min_upload_date=start_time,
                                         max_upload_date=end_time,
                                         privacy_filter=1,
                                         content_type=1,
                                         extras=extra_str,
                                         per_page=int(config.page_size)):
                    # Show the overall info if X sec passed
                    if time.time() - g_time_show_marker > g_time_show:
                        g_time_show_marker = time.time()
                        log.info('\033[1;33mFetch date %s-%s, label: %s\033[0m'
                                 % (tb.unixtime_to_datearr(start_time),
                                    tb.unixtime_to_datearr(end_time),
                                    text_str))
                        log.info('\033[1;33mFetch Photos: %d, Qualified Photos:\
%d, Db Size: %d, Trash Size: %d\033[0m' % (g_photo_counter,
                                 g_qualified_counter, db_size, db_trash_size))
                    photo_counter += 1
                    g_photo_counter += 1
                    # Check the database if the photo is already been recorded
                    if lt.check_photo_id(db, photo.get('id')):
                        # Update the label if needed
                        lt.update_label(db, photo.get('id'),
                                        [scenes_label, lens_label])
                        continue
                    if lt.check_photo_id(db_trash, photo.get('id')):
                        continue
                    # Check the photo, and fetch the exif if needed
                    try:
                        rst = tb.get_exif(flickr, photo, config)
                    except ConnectionError as e:
                        log.error('\033[0;31mFetch Exif Error:\033[0m %s' % e)
                        continue

                    # If photo info and exif is invalid, return None
                    if rst is None:
                        continue
                    else:
                        exif = rst[0]
                        stat = rst[1]
                        if stat:
                            qualified_counter += 1
                            g_qualified_counter += 1
                            db_size = lt.write_db(db, exif, photo,
                                                  [scenes_label, lens_label],
                                                  config)
                        else:
                            db_trash_size = lt.write_db(db_trash, exif, photo,
                                                        text_str, config)
                # Record the max batch size
                if photo_counter_max < photo_counter:
                    photo_counter_max = photo_counter

        # Finish the data slice loop, re-adjust the time_interval_num
        if config.time_dynamic:
            # Dynamically adjust the time interval according to
            # the max size of the fetch batch
            if photo_counter_max > 1.2 * int(config.batch_size):
                time_interval_num = int(time_interval_num / 1.2)
            elif photo_counter_max < 0.8 * int(config.batch_size):
                time_interval_num = int(time_interval_num / 0.8)

            if time_interval_num > float(config.time_interval_max):
                time_interval_num = float(config.time_interval_max)
            if time_interval_num < float(config.time_interval_min):
                time_interval_num = float(config.time_interval_min)

        # If the photo collected more than enough, make it stop
        if db_size > config.max_size:
            log.info('Total collected photos: %d, stop at %s' %
                     (db_size, tb.unixtime_to_datearr(end_time)))
            break

    # Finish the rest of the things.
    db.close()
    db_trash.close()