Пример #1
0
def read_file_list(fname, check=False):
    """
    Reads file names in a plain text file.

    Args:
        fname: string, path to line delimited file names.
        check: bool, check whether the files all exist.
    Returns:
        file_list: list, list of file names in string.
    Raises:
        Exception: image file name does not exists.
    """
    file_list = list(open(fname))
    file_list = [f.strip() for f in file_list]
    if check:
        log.info('Checking all files exist')
        N = len(file_list)
        pb = progress_bar.get_iter(file_list)
        for i, f in enumerate(pb):
            if not os.path.exists(f):
                log.fatal('File not found: {0}'.format(f))
    return file_list
Пример #2
0
def read_file_list(fname, check=False):
    """
    Reads file names in a plain text file.

    Args:
        fname: string, path to line delimited file names.
        check: bool, check whether the files all exist.
    Returns:
        file_list: list, list of file names in string.
    Raises:
        Exception: image file name does not exists.
    """
    file_list = list(open(fname))
    file_list = [f.strip() for f in file_list]
    if check:
        log.info('Checking all files exist')
        N = len(file_list)
        pb = progress_bar.get_iter(file_list)
        for i, f in enumerate(pb):
            if not os.path.exists(f):
                log.fatal('File not found: {0}'.format(f))
    return file_list
Пример #3
0
    height = 128
    width = 448
    feat_map_height = 16
    feat_map_width = 56
    img_channel = 3
    num_train_seq = 3
    num_seq = 5

    # read data
    train_video_seq = []
    valid_video_seq = []
    num_valid_seq = 0
    train_data_full = get_dataset(folder, 'train')

    with sh.ShardedFileReader(train_data_full) as reader:
        for idx_seq in pb.get_iter(xrange(num_seq)):
            seq_data = reader[idx_seq]
            if idx_seq < num_train_seq:
                train_video_seq.append(seq_data)
            else:
                if seq_data['gt_bbox'].shape[0] > 0:
                    valid_video_seq.append(seq_data)
                    num_valid_seq += 1

    # logger for saving intermediate output
    model_id = 'deep-tracker-003'
    logs_folder = args.logs
    logs_folder = os.path.join(logs_folder, model_id)

    iou_logger = TimeSeriesLogger(
        os.path.join(logs_folder, 'IOU_loss.csv'),
Пример #4
0
def get_dataset(folder, split):
    """Get KITTI dataset.

    Args:
        folder: root directory.
        split: train or test.

    Returns:
        dataset_file: ShardedFile object, use ShardedFileReader to read.
        See matching_data for example.

    """
    # 'train' => 'training', 'test' => 'testing'
    split_ing = split + 'ing'
    h5_fname = os.path.join(folder, split_ing, 'dataset-*')
    try:
        h5_f = sh.ShardedFile.from_pattern_read(h5_fname)
    except:
        h5_f = None

    if h5_f:
        return h5_f

    left_folder = os.path.join(folder, split_ing, 'image_02')
    right_folder = os.path.join(folder, split_ing, 'image_03')
    if split == 'train':
        label_folder = os.path.join(folder, split_ing, 'label_02')
        seqs = range(13)
    elif split == 'valid':
        seqs = range(13, 21)
    elif split == 'test':
        seqs = range(100)

    # List the sequences
    seq_list = []
    for seq_num in os.listdir(left_folder):
        if seq_num.startswith('0') and int(seq_num) in seqs:
            seq_list.append(seq_num)
            pass
        pass

    # Prepare output file
    fname_out = os.path.join(folder, split_ing, 'dataset')
    f_out = sh.ShardedFile(fname_out, num_shards=len(seq_list))
    target_types = set(['Van', 'Car', 'Truck'])

    with sh.ShardedFileWriter(f_out, num_objects=len(seq_list)) as writer:
        for seq_num in pb.get_iter(seq_list):

            seq_data = {}
            frame_start = None
            frame_end = None

            if split == 'train':
                label_fname = os.path.join(label_folder, seq_num + '.txt')
                obj_data = {}
                idx_map = []
                with open(label_fname) as label_f:
                    lines = label_f.readlines()
                    for ll in lines:
                        parts = ll.split(' ')
                        frame_no = int(parts[0])
                        ins_no = int(parts[1])
                        typ = parts[2]
                        truncated = int(parts[3])
                        occluded = int(parts[4])
                        bleft = float(parts[6])
                        btop = float(parts[7])
                        bright = float(parts[8])
                        bbot = float(parts[9])
                        if frame_start is None:
                            frame_start = frame_no
                            frame_end = frame_no
                        else:
                            frame_start = min(frame_start, frame_no)
                            frame_end = max(frame_start, frame_no)

                        raw_data = {
                            'frame_no': frame_no,
                            'ins_no': ins_no,
                            'typ': typ,
                            'truncated': truncated,
                            'occluded': occluded,
                            'bbox': (bleft, btop, bright, bbot)
                        }
                        if ins_no != -1 and typ in target_types:
                            if ins_no in obj_data:
                                obj_data[ins_no].append(raw_data)
                            else:
                                obj_data[ins_no] = [raw_data]

                num_ins = len(obj_data.keys())
                num_frames = frame_end - frame_start + 1
                bbox = np.zeros([num_ins, num_frames, 5], dtype='float32')
                idx_map = []

                for idx in obj_data.iterkeys():
                    new_idx = len(idx_map)
                    for dd in obj_data[idx]:
                        new_frame = dd['frame_no'] - frame_start
                        bbox[new_idx, new_frame, 4] = 1.0
                        bbox[new_idx, new_frame, 0: 4] = dd['bbox']
                    idx_map.append(idx)
                idx_map = np.array(idx_map, dtype='uint8')
                frame_map = np.arange(frame_start, frame_end + 1)

                seq_data['gt_bbox'] = bbox
                seq_data['idx_map'] = idx_map
                seq_data['frame_map'] = frame_map

            for camera, camera_folder in enumerate([left_folder, right_folder]):
                if seq_num.startswith('0'):
                    seq_folder = os.path.join(camera_folder, seq_num)
                else:
                    continue
                seq_folder = os.path.join(
                    folder, split_ing, camera_folder, seq_num)
                image_list = os.listdir(seq_folder)
                im_height = None
                im_width = None
                images = {}
                for ii, fname in enumerate(image_list):
                    img_fname = os.path.join(seq_folder, fname)
                    log.info(img_fname)
                    frame_no = int(fname[: 6])
                    img = cv2.imread(img_fname)
                    if frame_start is None:
                        frame_start = frame_no
                        frame_end = frame_no
                    else:
                        frame_start = min(frame_start, frame_no)
                        frame_end = max(frame_start, frame_no)
                    if im_height is None:
                        im_height = img.shape[0]
                        im_width = img.shape[1]
                    images[frame_no] = img

                if num_frames is None:
                    num_frames = frame_end - frame_start + 1
                final_images = np.zeros([num_frames, im_height, im_width, 3])
                for ii in images.iterkeys():
                    final_images[ii] = images
                    
                seq_data['images_{}'.format(camera)] = final_images

            writer.write(seq_data)

    return f_out

    pass
Пример #5
0
    valid_iou_iter = 500
    height = 128
    width = 448
    img_channel = 3
    num_train_seq = 16
    rnn_hidden_dim = 128

    # read data
    valid_video_seq = []
    num_valid_seq = 0
    train_data_full = get_dataset(folder, 'train')

    with sh.ShardedFileReader(train_data_full) as reader:
        num_seq = len(reader)

        for idx_seq, seq_data in enumerate(pb.get_iter(reader)):
            if idx_seq >= num_train_seq:
                if seq_data['gt_bbox'].shape[0] > 0:
                    valid_video_seq.append(seq_data)
                    num_valid_seq += 1

    # setting model
    opt_tracking = {}
    opt_tracking['rnn_seq_len'] = seq_length
    # opt_tracking['cnn_filter_size'] = [3, 3, 3, 3, 3, 3, 3, 3]
    # opt_tracking['cnn_num_filter'] = [16, 16, 32, 32, 64, 64, 96, 96]
    # opt_tracking['cnn_pool_size'] = [1, 2, 1, 2, 1, 2, 1, 2]

    opt_tracking['cnn_filter_size'] = [3, 3, 3, 3, 3, 3]
    opt_tracking['cnn_num_filter'] = [8, 8, 16, 16, 32, 32]
    opt_tracking['cnn_pool_size'] = [1, 2, 1, 2, 1, 2]
Пример #6
0
    height = 128
    width = 448
    img_channel = 3
    resume_training = False
    num_train_seq = 16

    # read data
    train_video_seq = []
    valid_video_seq = []
    num_valid_seq = 0
    train_data_full = get_dataset(folder, 'train')
    
    with sh.ShardedFileReader(train_data_full) as reader:    
        num_seq = len(reader)
        
        for idx_seq, seq_data in enumerate(pb.get_iter(reader)):
            if idx_seq < num_train_seq:
                train_video_seq.append(seq_data)
            else:            
                if seq_data['gt_bbox'].shape[0] > 0:
                    valid_video_seq.append(seq_data)
                    num_valid_seq += 1
    
    # logger for saving intermediate output
    model_id = 'deep-tracker-002'
    logs_folder = '/u/rjliao/public_html/results'
    logs_folder = os.path.join(logs_folder, model_id)

    logp_logger_IOU = TimeSeriesLogger(
        os.path.join(logs_folder, 'IOU_loss.csv'),
        labels=['IOU loss'],
Пример #7
0
    def get_dataset(self):
        """Get matching dataset. 

        Returns:
            dataset: dict
                images_0: [B, H, W, 3], first instance patches
                images_1: [B, H, W, 3], second instance patches
                label: [B], 1/0, whether they are the same instance. 
        """
        if self.dataset is not None:
            return self.dataset

        if self.h5_fname is not None:
            cache = data_utils.read_h5_data(self.h5_fname)
            if cache:
                return cache
        patch_height = self.opt['patch_height']
        patch_width = self.opt['patch_width']
        center_noise = self.opt['center_noise']
        padding_noise = self.opt['padding_noise']
        padding_mean = self.opt['padding_mean']
        num_ex_pos = self.opt['num_ex_pos']
        num_ex_neg = self.opt['num_ex_neg']
        shuffle = self.opt['shuffle']
        folder = self.folder
        split = self.split
        seqs = self.seqs
        usage = self.usage
        random = self.random

        dataset_pattern = os.path.join(folder, 'dataset-*')
        dataset_file = sh.ShardedFile.from_pattern_read(dataset_pattern)
        dataset_images = []
        dataset_labels = []

        if split is not None:
            if split == 'train':
                seqs = range(13)
                self.seqs = seqs
            elif split == 'valid':
                seqs = range(13, 21)
                self.seqs = seqs
            else:
                raise Exception('Unknown split: {}'.format(split))
            pass

        with sh.ShardedFileReader(dataset_file) as reader:
            for seq_num in pb.get_iter(seqs):
                seq_data = reader[seq_num]
                images = seq_data['images_0']
                gt_bbox = seq_data['gt_bbox']
                num_obj = gt_bbox.shape[0]
                num_frames = gt_bbox.shape[1]
                nneg = num_ex_neg * num_obj
                npos = num_ex_pos * num_obj

                if usage == 'match':
                    output_images = np.zeros(
                        [nneg + npos, 2, patch_height, patch_width, 3],
                        dtype='uint8')
                elif usage == 'detect' or usage == 'detect_multiscale':
                    output_images = np.zeros(
                        [nneg + npos, patch_height, patch_width, 3],
                        dtype='uint8')

                output_labels = np.zeros([nneg + npos], dtype='uint8')
                dataset_images.append(output_images)
                dataset_labels.append(output_labels)

                if num_obj < 2:
                    continue

                if usage == 'match':
                    output_images[: nneg], output_labels[: nneg] = \
                        self.get_neg_pair(nneg, images, gt_bbox)

                    output_images[nneg:], output_labels[nneg:] = \
                        self.get_pos_pair(npos, images, gt_bbox)
                elif usage == 'detect':
                    output_images[: nneg], output_labels[: nneg] = \
                        self.get_neg_patch(nneg, images, gt_bbox)

                    output_images[nneg:], output_labels[nneg:] = \
                        self.get_pos_patch(npos, images, gt_bbox)
                elif usage == 'detect_multiscale':
                    output_images[: nneg], output_labels[: nneg] = \
                        self.get_neg_patch_multiscale(nneg, images, gt_bbox)

                    output_images[nneg:], output_labels[nneg:] = \
                        self.get_pos_patch_multiscale(npos, images, gt_bbox)
                pass
            pass

        dataset = self.assemble_dataset(dataset_images, dataset_labels)
        self.dataset = dataset

        if self.h5_fname is not None:
            data_utils.write_h5_data(self.h5_fname, dataset)

        return dataset