コード例 #1
0
 def __build_sample_list(self, root, annot_files, dataset_name):
     image_root = root + '/JPEGImages/'
     samples = []
     for fn in tqdm(annot_files, desc=dataset_name, unit='samples'):
         with open(fn, 'r') as f:
             doc = lxml.etree.parse(f)
             filename = image_root + doc.xpath(
                 '/annotation/filename')[0].text
             if not os.path.exists(filename):
                 continue
             img = cv2.imread(filename)
             imgsize = Size(img.shape[1], img.shape[0])
             boxes = []
             objects = doc.xpath('/annotation/object')
             for obj in objects:
                 label = obj.xpath('name')[0].text
                 if label == myObject:
                     xmin = int(float(obj.xpath('bndbox/xmin')[0].text))
                     xmax = int(float(obj.xpath('bndbox/xmax')[0].text))
                     ymin = int(float(obj.xpath('bndbox/ymin')[0].text))
                     ymax = int(float(obj.xpath('bndbox/ymax')[0].text))
                     center, size = abs2prop(xmin, xmax, ymin, ymax,
                                             imgsize)
                     box = Box(label, self.lname2id[label], center, size)
                     boxes.append(box)
             if not boxes:
                 continue
             sample = Sample(filename, boxes, imgsize)
             samples.append(sample)
     return samples
コード例 #2
0
def transform_gt(gt, new_size, h_off, w_off):
    boxes = []
    for box in gt.boxes:
        box = transform_box(box, gt.imgsize, new_size, h_off, w_off)
        if box is None:
            continue
        boxes.append(box)
    return Sample(gt.filename, boxes, new_size)
コード例 #3
0
def read_from_list(file, data_path):
    ids = []
    with open(file, 'r') as phase_input:
        lines = phase_input.readlines()
        for line in lines:
            splt = line.split('/')[1]
            ids.append(splt)
    train_data = osp.join(data_path, 'train')
    image_folder = osp.join(train_data, 'images')
    mask_folder = osp.join(train_data, 'masks')

    samples = {}
    for id in ids:
        id = id[:-1]
        samples[id] = Sample()
        samples[id].image_fp = osp.join(image_folder, id + '.png')
        samples[id].mask_fp = osp.join(mask_folder, id + '.png')
    return samples
コード例 #4
0
 def __call__(self, data, label, gt):
     flip = data.transpose(Image.FLIP_LEFT_RIGHT)
     boxes = []
     for box in gt.boxes:
         center = Point(1 - box.center.x, box.center.y)
         box = Box(box.label, box.labelid, center, box.size)
         boxes.append(box)
     gt = Sample(gt.filename, boxes, gt.imgsize)
     return flip, label, gt
コード例 #5
0
    def __call__(self, data, label, gt):
        data = cv2.flip(data, 1)
        boxes = []
        for box in gt.boxes:
            center = Point(1 - box.center.x, box.center.y)
            box = Box(box.label, box.labelid, center, box.size)
            boxes.append(box)
        gt = Sample(gt.filename, boxes, gt.imgsize)

        return data, label, gt
コード例 #6
0
    def __build_sample_list(self, root, annot_files):
        """
        Build a list of samples for the VOC dataset (either trainval or test)
        """
        image_root = os.path.join(root, 'rgb-images')
        samples = []
        #-----------------------------------------------------------------------
        # Process each annotated sample
        #-----------------------------------------------------------------------
        for fn in tqdm(annot_files, desc='ucf_24_frame', unit='samples'):
            act = fn.split('/')[4]
            video = fn.split('/')[5]
            frame_id = fn.split('/')[-1][:-4]
            image_path = os.path.join(image_root, act, video,
                                      '{}.jpg'.format(frame_id))

            #---------------------------------------------------------------
            # Get the file dimensions
            #---------------------------------------------------------------
            if not os.path.exists(image_path):
                continue

            img = cv2.imread(image_path)
            imgsize = Size(img.shape[1], img.shape[0])

            #---------------------------------------------------------------
            # Get boxes for all the objects
            #---------------------------------------------------------------
            boxes = []
            with open(fn, 'r') as fin:
                objects = fin.readlines()
            for line in objects:
                line = line[:-1]
                #-----------------------------------------------------------
                # Get the properties of the box and convert them to the
                # proportional terms
                #-----------------------------------------------------------
                obj = line.split(' ')
                label = int(obj[0]) - 1
                xmin = int(float(obj[1]))
                ymin = int(float(obj[2]))
                xmax = int(float(obj[3]))
                ymax = int(float(obj[4]))

                center, size = abs2prop(xmin, xmax, ymin, ymax, imgsize)
                box = Box(self.lid2name[label], label, center, size)
                boxes.append(box)
            if not boxes:
                continue
            sample = Sample(image_path, boxes, imgsize)
            samples.append(sample)

        return samples
コード例 #7
0
    def _build_sample_list(self, root, annot_files):
        image_root = root + 'VOC_Jpeg/'
        image_seg_root = root + 'VOC_Segmentation/'
        samples = []

        for fn in tqdm(annot_files, unit='samples'):
            with open(fn, 'r') as f:
                doc = lxml.etree.parse(f)
                filename = image_root + doc.xpath(
                    '/annotation/filename')[0].text
                with open(fn, 'r') as f1:
                    doc1 = lxml.etree.parse(f1)
                    seg_gt = image_seg_root + doc1.xpath(
                        '/annotation/filename')[0].text
                    seg_gt = seg_gt.replace('jpg', 'png')
                    seg_gt_to_compare = seg_gt

                #---------------------------------------------------------------
                # Get the file dimensions
                #---------------------------------------------------------------
                if not os.path.exists(filename):
                    continue

                img = cv2.imread(filename)
                img_seg_gt = cv2.imread(seg_gt)
                imgsize = Size(img.shape[1], img.shape[0])

                #---------------------------------------------------------------
                # Get boxes for all the objects
                #---------------------------------------------------------------
                boxes = []
                objects = doc.xpath('/annotation/object')
                for obj in objects:
                    #-----------------------------------------------------------
                    # Get the properties of the box and convert them to the
                    # proportional terms
                    #-----------------------------------------------------------
                    label = obj.xpath('name')[0].text
                    xmin = int(float(obj.xpath('bndbox/xmin')[0].text))
                    xmax = int(float(obj.xpath('bndbox/xmax')[0].text))
                    ymin = int(float(obj.xpath('bndbox/ymin')[0].text))
                    ymax = int(float(obj.xpath('bndbox/ymax')[0].text))
                    center, size = abs2prop(xmin, xmax, ymin, ymax, imgsize)
                    box = Box(label, self.lname2id[label], center, size)
                    boxes.append(box)
                if not boxes:
                    continue
                sample = Sample(filename, boxes, imgsize, seg_gt,
                                seg_gt_to_compare)
                samples.append(sample)

        return samples
コード例 #8
0
def get_all_samples(root_path, has_target=True):
    samples = {}

    image_folder = osp.join(root_path, 'images')
    image_files = glob.glob(osp.join(image_folder, '*.png'))

    for image_file in image_files:
        sample_id = get_file_id(image_file)
        samples[sample_id] = Sample()
        samples[sample_id].image_fp = image_file

    if has_target:
        mask_folder = osp.join(root_path, 'masks')
        mask_files = glob.glob(osp.join(mask_folder, '*.png'))

        for mask_file in mask_files:
            sample_id = get_file_id(mask_file)
            assert sample_id in samples, 'get all samples, wrong mask id'

            samples[sample_id].mask_fp = mask_file

    return samples
コード例 #9
0
    def __build_sample_list(self, root, dataset_name):
        """
        Build a list of samples for the VOC dataset (either trainval or test)
        """
        image_root = root + '/JPEGImages/'
        annot_root = root + '/Annotations/'
        annot_files = glob(annot_root + '/*xml')
        samples = []

        #-----------------------------------------------------------------------
        # Process each annotated sample
        #-----------------------------------------------------------------------
        for fn in tqdm(annot_files, desc=dataset_name, unit='samples'):
            with open(fn, 'r') as f:
                doc = lxml.etree.parse(f)
                filename = image_root + doc.xpath(
                    '/annotation/filename')[0].text

                #---------------------------------------------------------------
                # Get the file dimensions
                #---------------------------------------------------------------
                if not os.path.exists(filename):
                    continue

                img = cv2.imread(filename)
                imgsize = Size(img.shape[1], img.shape[0])

                #---------------------------------------------------------------
                # Get boxes for all the objects
                #---------------------------------------------------------------
                boxes = []
                objects = doc.xpath('/annotation/object')
                for obj in objects:
                    #-----------------------------------------------------------
                    # Get the properties of the box and convert them to the
                    # proportional terms
                    #-----------------------------------------------------------
                    label = obj.xpath('name')[0].text
                    xmin = int(float(obj.xpath('bndbox/xmin')[0].text))
                    xmax = int(float(obj.xpath('bndbox/xmax')[0].text))
                    ymin = int(float(obj.xpath('bndbox/ymin')[0].text))
                    ymax = int(float(obj.xpath('bndbox/ymax')[0].text))
                    center, size = abs2prop(xmin, xmax, ymin, ymax, imgsize)
                    box = Box(label, self.lname2id[label], center, size)
                    boxes.append(box)
                if not boxes:
                    continue
                sample = Sample(filename, boxes, imgsize)
                samples.append(sample)

        return samples
コード例 #10
0
    def __call__(self, fname):
        with Image.open(fname) as img:
            img = img.convert('RGB')
        img_size = Size(img.size[0], img.size[1])

        if self.dataset == 'VOC':
            #fname : '~/VOC/JPEGImages/002222.jpg'
            with open(
                    os.path.join(fname[:-21], 'Annotations',
                                 fname[-10:-4] + '.xml'), 'r') as f:
                doc = lxml.etree.parse(f)
                boxes = []
                objects = doc.xpath('/annotation/object')

                for obj in objects:
                    label = obj.xpath('name')[0].text
                    xmin = float(obj.xpath('bndbox/xmin')[0].text)
                    xmax = float(obj.xpath('bndbox/xmax')[0].text)
                    ymin = float(obj.xpath('bndbox/ymin')[0].text)
                    ymax = float(obj.xpath('bndbox/ymax')[0].text)

                    labels = self.label2idx[label]

                    center, size = abs2prop(
                        xmin, xmax, ymin, ymax,
                        img_size)  # make gt to 0~1 important!!!!!!!!!!!!!!
                    box = Box(obj, labels, center, size)
                    boxes.append(box)

        elif self.dataset == 'KITTI':
            with open(
                    os.path.join(fname[:-18], 'label_2',
                                 fname[-10:-4] + '.txt'), 'r') as fp:
                objs = [line.split(' ') for line in fp.readlines()]
            boxes = []
            for obj in objs:
                if not obj[0] == 'DontCare':
                    xmin = float(obj[4])
                    ymin = float(obj[5])
                    xmax = float(obj[6])
                    ymax = float(obj[7])
                    label = self.label2idx[obj[0]]
                    center, size = abs2prop(
                        xmin, xmax, ymin, ymax, img_size
                    )  # Convert the absolute min-max box bound to proportional center-width bounds
                    # (/300 , /1300) -> (0~1, 0~1)
                    box = Box(obj[0], label, center, size)
                    boxes.append(box)

        sample = Sample(fname, boxes, img_size)
        return img, labels, sample
コード例 #11
0
def predict():
    # get data
    input_path = "input.jpg"
    input_pre_path = "input_pre.jpg"
    try:
        # check if the post request has the file part
        filestr = request.files['image'].read()
        # convert string data to numpy array
        npimg = np.fromstring(filestr, np.uint8)
        # convert numpy array to image
        img = cv2.imdecode(npimg, cv2.IMREAD_COLOR)
        img = detect(img)
        cv2.imwrite(input_path, img)
        process_image_for_ocr(input_path, input_pre_path)
        data = [Sample("", input_pre_path)]
        test_set = TextSequenceGenerator(data)
        samples = test_set[0]
        img = samples[0]['the_input'][0]

        # img = image.load_img(new_file_path, target_size=ArchitectureConfig.IMG_SIZE[::-1], interpolation='bicubic')
        # img = image.img_to_array(img)
        # img = preprocess_input(img)
        # img = img.transpose((1, 0, 2))
        img = np.expand_dims(img, axis=0)
        img = img.transpose((0, 1, 2, 3))

        net_out_value = model.predict(img)
        pred_texts = decode_predict_ctc(net_out_value, ArchitectureConfig.CHARS)
        output = {'results': pred_texts[0]}


    except Exception as err:
        print(err)
        output = {'results': "Error!"}


    # convert data into dataframe
    #data.update((x, [y]) for x, y in data.items())
    #data_df = pd.DataFrame.from_dict(data)

    # predictions
    #result = model.predict(data_df)

    # send back to browser
    #output = {'results': 'OK'}

    # return data
    #return jsonify(results=output)
    print(output)
    return json.dumps(output, ensure_ascii=False).encode('utf8')
コード例 #12
0
 def __build_sample_list(self, root,image_dir,info_dir):
     """
     Build a list of samples for the VOC dataset (either trainval or test)
     """
     samples     = []
     file = open(root+info_dir ,'r')
     All = file.read()
     file.close()
     lines = All.split('\n')
     print(len(lines))
     #-----------------------------------------------------------------------
     # Process each annotated sample
     #-----------------------------------------------------------------------
     i=0
     while i<(len(lines)-1)/10:
         #---------------------------------------------------------------
         # Get the file dimensions
         #---------------------------------------------------------------
         filename = root+image_dir+lines[i]
         img = cv2.imread(filename)
         imgsize = Size(img.shape[1], img.shape[0])
         #---------------------------------------------------------------
         # Get boxes for all the objects
         #---------------------------------------------------------------
         boxes    = []
         i+=1
         num_objects  = int(lines[i])
         if not num_objects:
             i+=2
             continue
         i+=1
         for obj in range(num_objects):
             #-----------------------------------------------------------
             # Get the properties of the box and convert them to the
             # proportional terms
             #-----------------------------------------------------------
             label = 'face'
             xmin = int(lines[i+obj].split()[0])-int(lines[i+obj].split()[2])/2
             xmax = int(lines[i+obj].split()[0])+int(lines[i+obj].split()[2])/2
             ymin = int(lines[i+obj].split()[1])-int(lines[i+obj].split()[3])/2
             ymax = int(lines[i+obj].split()[1])+int(lines[i+obj].split()[3])/2
             center, size = abs2prop(xmin, xmax, ymin, ymax, imgsize)
             box = Box(label, self.lname2id[label], center, size)
             boxes.append(box)
         i+=num_objects
         sample = Sample(filename, boxes, imgsize)
         samples.append(sample)
     return samples
コード例 #13
0
ファイル: main.py プロジェクト: feshfesh123/HTR_tf2
def predict(img_path):
    model = CRNN_model(CRNN_MODE.inference)
    model.load_weights(FilePaths.fnSave)
    img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
    data = [Sample("", img_path)]
    test_set = TextSequenceGenerator(data)

    samples = test_set[0]
    img = samples[0]['the_input'][0]

    chars_ = ArchitectureConfig.CHARS

    # plt.imshow(np.squeeze(img).T)
    img = np.expand_dims(img, axis=0)
    print(img.shape)
    net_out_value = model.predict(img)
    print(net_out_value.shape)
    pred_texts = top_pred_texts = decode_predict_ctc(net_out_value, chars_)
    print(pred_texts)
コード例 #14
0
def retrieve_peaks(peak_file, peak_kwd, group_name, seq_dict):
    peaks = pybedtools.BedTool(peak_file)
    num_files = len(seq_dict[peak_kwd][group_name].keys())
    print_time("{} batches to process".format(num_files), start_time)

    for file in seq_dict[peak_kwd][group_name].keys():
        if __check_exist(file):
            print_time("{} exists -- skip".format(file), start_time)
            continue

        # Initialize output signal.
        sample_index, start, stop = __file_attribute(
            seq_dict[peak_kwd][group_name][file])
        signal = np.empty((stop - start, args.sequence_length + 3))
        signal[:] = np.NaN
        signal[:, -3:-1] = sample_index

        # Find peaks that overlap with each sample sequence.
        for k in tqdm(range(start, stop)):
            ks = k - start
            signal[ks, -1] = k
            signal[ks, :args.sequence_length] = 0
            sample = Sample(seq_dict["input"][k])
            entry = "{} {} {}".format(sample.chrom, sample.start, sample.stop)
            a = pybedtools.BedTool(entry, from_string=True)
            apeaks = a.intersect(peaks)
            for p in apeaks:
                s = p.start - sample.start
                t = p.stop - sample.start
                signal[ks, s:t] = 1
            if (k + 1) % 1000 == 0:
                pybedtools.cleanup(remove_all=True)

        # Save batch data file to disk.
        np.savez_compressed(
            file,
            group_name=group_name,
            peak_type=peak_kwd,
            start=start,
            stop=stop,
            data=signal,
        )
        print_time("{} targets saved in {}".format(peak_kwd, file), start_time)
コード例 #15
0
def retrieve_signal(
    peak_file, bigWig_file, seq_dict, tmp_file, group_name, assay_type
):
    peaks = pybedtools.BedTool(peak_file)
    num_samples = len(seq_dict["input"])
    writer = FeatureWriter(
        args.batch_size,
        args.sequence_length,
        num_samples,
        group_name,
        assay_type,
    )

    for k in tqdm(seq_dict["input"].keys()):
        # Initialize signal track.
        signal = np.zeros(args.sequence_length)

        # Construct BedTool input from sample sequence location.
        sample = Sample(seq_dict["input"][k])
        entry = "{} {} {}".format(sample.chrom, sample.start, sample.stop)
        a = pybedtools.BedTool(entry, from_string=True)

        # Retrieve sample bigwig signal that fall within peak regions.
        apeaks = a.intersect(peaks)
        for p in apeaks:
            cmd = "bigWigToBedGraph -chrom={} -start={} -end={} {} {}".format(
                sample.chrom, p.start, p.stop, bigWig_file, tmp_file
            )
            check_call(cmd, shell=True)
            with open(tmp_file, "rb") as wigs:
                for line in wigs:
                    record = line.strip().decode("utf-8").split("\t")
                    s = int(record[1]) - sample.start
                    t = int(record[2]) - sample.start
                    signal[s:t] = float(record[3])
        # Write signal track to disk.
        writer.write_feature(
            signal, k, seq_dict["input"][k][assay_type][group_name]
        )
        # Clean up tmp files generated by pybedtools.
        if (k + 1) % 1000 == 0:
            pybedtools.cleanup(remove_all=True)
    return writer
コード例 #16
0
ファイル: profiler.py プロジェクト: zjuter06060126/pai
def get_sample_data(cpu_file, mem_file, blk_file, net_file, gpu_id, period):
    [mem_used, mem_total] = get_memory_percent(mem_file)

    # 1st info about I/O, network and CPU
    # read_bytes1 = get_disk_read_bytes(blk_file)
    # write_bytes1 = get_disk_write_bytes(blk_file)
    [read_bytes1, write_bytes1] = get_disk_bytes(blk_file)
    [network_receive1, network_transmit1] = get_network_bytes(net_file)
    [sys_ticks1, container_ticks1] = get_cpu_ticks(cpu_file)
    time.sleep(period)
    # 2nd info about I/O, network and CPU, calculate how many bytes used in this period
    # read_bytes2 = get_disk_read_bytes(blk_file)
    # write_bytes2 = get_disk_write_bytes(blk_file)
    [read_bytes2, write_bytes2] = get_disk_bytes(blk_file)
    [network_receive2, network_transmit2] = get_network_bytes(net_file)
    [sys_ticks2, container_ticks2] = get_cpu_ticks(cpu_file)

    online_cpus = os.sysconf(os.sysconf_names['SC_NPROCESSORS_ONLN'])
    cpu_usage = (container_ticks2 - container_ticks1) * 1.0 / (
        sys_ticks2 - sys_ticks1) * online_cpus * 100

    # get the usage of the GPU to analyze
    gpu_usage = list()
    gpu_mem = list()
    gpu_mem_used = list()
    gpu_mem_total = list()
    for gid in gpu_id:
        gpu_usage.append(get_gpu_utilization(gid).gpu)
        gpu_mem.append(get_gpu_utilization(gid).memory)
        gpu_mem_used.append(get_gpu_memory(gid).used / Byte_GiByte)
        gpu_mem_total.append(get_gpu_memory(gid).total / Byte_GiByte)
    sample_data = Sample(
        cpu_usage, mem_used / Byte_GiByte, mem_total / Byte_GiByte,
        (read_bytes2 - read_bytes1) / period / Byte_KiByte,
        (write_bytes2 - write_bytes1) / period / Byte_KiByte,
        (network_receive2 - network_receive1) / period / Byte_KiByte,
        (network_transmit2 - network_transmit1) / period / Byte_KiByte,
        gpu_usage, gpu_mem, gpu_mem_used, gpu_mem_total)
    return sample_data