コード例 #1
0
 def send_img_msg(self, target_addr, msg):
     if self.has_connection:
         self.uranus_sdk.send_img_msg_v2(target_addr, msg, self.ws_conn)
     else:
         logging.error(
             '~~~~~~~~ uranuspy send failed. ............................no connection'
         )
コード例 #2
0
 def broadcast_txt_msg(self, msg):
     if self.has_connection:
         # get all friends, then send msg one by one
         # all_friends = self.get_all_friends()
         page_num = 0
         is_last = False
         i = 0
         while not is_last:
             rp = requests.get(self.uranus_sdk.get_allusers_url +
                               '?token={}&page_num={}&per_page=15'.format(
                                   self.uranus_sdk.token, page_num))
             rp = rp.json()
             if rp['status'] == 'success':
                 if rp['data'] != None:
                     for u in rp['data']:
                         i += 1
                         user = UranusUserCard()
                         user.load_from_dict(u)
                         # logging.info('broadcasting: {} {}'.format(user.user_addr, user.user_nick_name))
                         self.uranus_sdk.send_msg(user.user_addr, msg,
                                                  self.ws_conn)
                     page_num += 1
                 else:
                     is_last = True
             else:
                 pass
         logging.info('Now we finished broadcasting!!')
         self.uranus_sdk.send_msg('usrZK8kZTzEHC',
                                  '消息广播完毕....消息共推送到了{}位用户'.format(i),
                                  self.ws_conn)
     else:
         logging.error(
             '~~~~~~~~ broadcast_txt_msg send failed. ............................no connection'
         )
コード例 #3
0
def run(p):
    all_hwdb_gnt_files = glob.glob(os.path.join(p, '*.gnt'))
    logging.info('got all {} gnt files.'.format(len(all_hwdb_gnt_files)))
    logging.info('gathering charset...')
    charset = []
    if os.path.exists('characters.txt'):
        logging.info('found exist characters.txt...')
        with open('characters.txt', 'rb') as f:
            charset = f.readlines()
            charset = [i.strip() for i in charset]
    else:
        if 'Gnt' in p:
            for gnt in all_hwdb_gnt_files:
                hwdb = CASIAHWDBGNT(gnt)
                for img, tagcode in hwdb.get_data_iter():
                    try:
                        label = struct.pack('>H', tagcode).decode('gbk')
                        label = label.replace('\x00', '')
                        charset.append(label)
                    except Exception as e:
                        continue
            charset = sorted(set(charset))
            with open('characters.txt', 'w') as f:
                f.writelines('\n'.join(charset))
    logging.info('all got {} characters.'.format(len(charset)))
    logging.info('{}'.format(charset[:10]))

    tfrecord_f = os.path.basename(os.path.dirname(p)) + '.tfrecord'
    logging.info('tfrecord file saved into: {}'.format(tfrecord_f))
    i = 0
    with tf.io.TFRecordWriter(tfrecord_f) as tfrecord_writer:
        for gnt in all_hwdb_gnt_files:
            hwdb = CASIAHWDBGNT(gnt)
            for img, tagcode in hwdb.get_data_iter():
                try:
                    # why do you need resize?
                    w = img.shape[0]
                    h = img.shape[1]
                    # img = cv2.resize(img, (64, 64))
                    label = struct.pack('>H', tagcode).decode('gbk')
                    label = label.replace('\x00', '')
                    index = charset.index(label)
                    # save img, label as example
                    example = tf.train.Example(features=tf.train.Features(
                        feature={
                            "label": tf.train.Feature(int64_list=tf.train.Int64List(value=[index])),
                            'image': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img.tobytes()])),
                            'width': tf.train.Feature(int64_list=tf.train.Int64List(value=[w])),
                            'height': tf.train.Feature(int64_list=tf.train.Int64List(value=[h])),
                        }))
                    tfrecord_writer.write(example.SerializeToString())
                    if i % 5000:
                        logging.info('solved {} examples. {}: {}'.format(i, label, index))
                    i += 1
                except Exception as e:
                    logging.error(e)
                    e.with_traceback()
                    continue
    logging.info('done.')
コード例 #4
0
 def send_voice_msg(self, target_addr, msg):
     if self.has_connection:
         NotImplementedError(
             'You need call Baidu Voice api to generate voice from message.'
         )
     else:
         logging.error(
             '~~~~~~~~ uranuspy send failed. ............................no connection'
         )
コード例 #5
0
    def send_msg_by_user_acc(self, user_acc, msg):
        if self.has_connection:
            user = self.get_user_by_user_acc(user_acc)
            self.uranus_sdk.send_msg(target_addr=user.user_addr,
                                     content=msg,
                                     ws=self.ws_conn)

        else:
            logging.error(
                '~~~~~~~~ uranuspy send failed. ............................no connection'
            )
コード例 #6
0
def get_model():
    # init model
    model = build_net_003((64, 64, 1), num_classes)
    logging.info('model loaded.')

    latest_ckpt = tf.train.latest_checkpoint(os.path.dirname(ckpt_path))
    if latest_ckpt:
        start_epoch = int(latest_ckpt.split('-')[1].split('.')[0])
        model.load_weights(latest_ckpt)
        logging.info('model resumed from: {} at epoch: {}'.format(latest_ckpt, start_epoch))
        return model
    else:
        logging.error('can not found any checkpoints matched: {}'.format(ckpt_path))
コード例 #7
0
 def send_voice_to_subscribers(self, msg_bytes):
     if self.has_connection:
         if self.debug:
             logging.info(
                 '~~~~~~~~ uranuspy now send to subscribers......!!!!!!!!!!!!!!!!!!!!!'
             )
             logging.info('[uranuspy] now subscribers: ',
                          self.subscribers_users)
         for item in self.subscribers_users:
             target_address = item['user_addr']
             self.uranus_sdk.send_voice_msg(target_addr=target_address,
                                            content=msg_bytes,
                                            ws=self.ws_conn)
     else:
         logging.error(
             '~~~~~~~~ uranuspy send failed. ............................no connection'
         )
コード例 #8
0
    def __init__(self, phase, size, base, extras, head, num_classes):
        super(RFBNet, self).__init__()
        self.phase = phase
        self.num_classes = num_classes
        self.size = size
        #self.x1 = x1
        self.num_classes = num_classes
        self.size = size

        if size == 300:
            self.indicator = 3
        elif size == 512:
            self.indicator = 5
        else:
            logging.error("Error: Sorry only 300 and 512 are supported!")
            return
        # vgg network
        self.base = nn.ModuleList(base)
        # conv_4
        self.Norm = BasicRFB_a(512, 512, stride=1, scale=1.0)
        self.extras = nn.ModuleList(extras)

        self.fpn = FeaturePyramidNetwork(C3_size=512, C4_size=1024)

        self.loc = nn.ModuleList(head[0])
        self.conf = nn.ModuleList(head[1])
        if self.phase == 'test':
            self.softmax = nn.Softmax(dim=-1)
        # 以下各层分别定义extra_layers0-6层,其中只取0(RFB模块,输入/输出1024),1(10*10RFBNET stride2 1024/512),2(5*5RFBNET stride2 512/256),4(3*3conv10_2 256/256),6(1*1conv11_2 256/256)使用
        self.BasicRFB1 = BasicRFB_d(1024, 1024, scale=1.0, visual=2)
        self.BasicRFB2 = BasicRFB_d1(1024, 512, stride=2, scale=1.0, visual=2)
        self.BasicRFB3 = BasicRFB(512, 256, stride=2, scale=1.0, visual=1)

        if self.size == 300:
            self.BasicConva1 = BasicConv(256, 128, kernel_size=1, stride=1)
            self.BasicConva2 = BasicConv(128, 256, kernel_size=3, stride=1)
            self.BasicConva3 = BasicConv(256, 128, kernel_size=1, stride=1)
            self.BasicConva4 = BasicConv(128, 256, kernel_size=3, stride=1)
        elif self.size == 512:
            self.BasicConva1 = BasicConv(256, 128, kernel_size=1, stride=1)
            self.BasicConva2 = BasicConv(128, 256, kernel_size=3, stride=1)
            self.BasicConva3 = BasicConv(256, 128, kernel_size=1, stride=1)
            self.BasicConva4 = BasicConv(128, 256, kernel_size=3, stride=1)
コード例 #9
0
def stack_imgs(imgs_list, dim2d):
    """
    send a list of images
    then using dim2d to stack it

    for example:
        a.png
        b.png
        c.png
        d.png
    
    dim2d:
        2x2
    """
    a = int(dim2d.split('x')[0])
    b = int(dim2d.split('x')[1])
    if len(imgs_list) % a != 0 or len(imgs_list) % b:
        logging.info('dim2d {} is not applicable for {} images.'.format(
            dim2d, len(imgs_list)))
        exit(0)
    elif len(imgs_list) != a * b:
        logging.error('len imgs not equal to: axb={}'.format(a * b))
        exit(0)
    else:
        imgs_list = [cv2.imread(i) for i in imgs_list]
        all_raws = []
        # 2x1 bug?
        for ri in range(a):
            one_raw = []
            for ci in range(b):
                one_raw.append(imgs_list[ri * b + ci])
                logging.info('stacking row: {}, with len: {}'.format(
                    ri, len(one_raw)))
            imgs = check_shape_resize_if_possible(one_raw)
            img_a = np.hstack(imgs)
            all_raws.append(img_a)
        all_raws = check_shape_resize_if_possible(all_raws)
        final_img = np.vstack(all_raws)
        logging.info('final combined img shape: {}'.format(final_img.shape))
        cv2.imwrite('stacked_img.jpg', final_img)
        logging.info('done.')
コード例 #10
0
ファイル: core.py プロジェクト: mana-ai/friday
 def run_forever(self):
     if self.uranus_sdk.is_login:
         try:
             self.ws = websocket.create_connection(self.uranus_sdk.ws_url)
             self.uranus_op.set_ws_conn(self.ws)
             thread = threading.Thread(target=self.recv_ws)
             # thread.daemon = True
             thread.start()
             self.ws.send(self.uranus_sdk.hi())
             
             # online broadcast
             self.uranus_op.send_msg_to_subscribers("我上线啦,欢迎来聊天~")
             logging.info('[uranuspy] auto serving as {}'.format(self.user_acc))
         except Exception as e:
             self.ws.close()
             logging.error(e)
             logging.info('try re-login...')
     else:
         logging.info('[Uranus] now logged in!')
         self.uranus_sdk.login(self.user_acc, self.user_password)
         self.run_forever()
コード例 #11
0
    def send_msg_to_subscribers(self, msg):
        """
        load subscribers and then send msg
        :param msg:
        :return:
        """
        if self.has_connection:
            if self.debug:
                logging.info(
                    '~~~~~~~~ uranuspy now send to subscribers......!!!!!!!!!!!!!!!!!!!!!'
                )
                logging.info('now subscribers: ', self.subscribers_users)
            for item in self.subscribers_users:
                target_address = item['user_addr']
                self.uranus_sdk.send_msg(target_addr=target_address,
                                         content=msg,
                                         ws=self.ws_conn)

        else:
            logging.error(
                '~~~~~~~~ uranuspy send failed. ............................no connection'
            )
コード例 #12
0
def load_txt_or_xml_format(t_f):
    if t_f.endswith('txt'):
        # open txt file lines to a list
        with open(t_f) as f:
            content = f.readlines()
        # remove whitespace characters like `\n` at the end of each line
        content = [x.strip() for x in content]
        return content
    elif t_f.endswith('xml'):
        root = ET.parse(t_f).getroot()
        all_gts = []
        for obj in root.findall('object'):
            obj_name = obj.find('name').text
            bndbox = obj.find('bndbox')
            left = bndbox.find('xmin').text
            top = bndbox.find('ymin').text
            right = bndbox.find('xmax').text
            bottom = bndbox.find('ymax').text
            all_gts.append(' '.join([obj_name, left, top, right, bottom]))
        return all_gts
    else:
        logging.error('unsupported gt file format.')
        exit(0)
コード例 #13
0
ファイル: core.py プロジェクト: mana-ai/friday
 def recv_ws(self):
     ws = self.ws
     while True:
         try:
             opcode, data = self.recv()
             msg = None
             if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
                 data = str(data, "utf-8")
             if opcode in _OPCODE_DATA:
                 msg = data
                 if 'test' not in msg:
                     try:
                         msg_json = json.loads(json.dumps(eval(msg.replace('false', 'False').replace('true', 'True').replace('null', 'None'))))
                         purpose = msg_json['purpose']
                         if purpose == 'init':
                             logging.info('[uranuspy] initing...')
                             # we only need to get those unread msg from history messages
                             all_history_msgs = msg_json['payload']
                             logging.info('[uranuspy] got latest msgs: {}'.format(len(all_history_msgs)))
                             for msg in all_history_msgs:
                                 if not msg['read']:
                                     rtn = self.msgs_callback(msg)
                                     if rtn:
                                         # print('rtn: ', rtn)
                                         self.uranus_op.send_txt_msg(msg['sender'], rtn)
                         else:
                             rtn = self.msgs_callback(msg_json['payload'])
                             if rtn:
                                 self.uranus_op.send_txt_msg(msg_json['payload']['sender'], rtn)
                     except Exception as e:
                         logging.error(e)
                         # logging.error(msg_json)
             else:
                 pass
         except Exception as e:
             logging.error('Got an exception in msg callback function, full error trace back are: {}'.format(e.with_traceback(0)))
             self.uranus_op.send_msg_by_user_acc('jintian', str(e))
コード例 #14
0
                    # img = cv2.resize(img, (64, 64))
                    label = struct.pack('>H', tagcode).decode('gbk')
                    label = label.replace('\x00', '')
                    index = charset.index(label)
                    # save img, label as example
                    example = tf.train.Example(features=tf.train.Features(
                        feature={
                            "label": tf.train.Feature(int64_list=tf.train.Int64List(value=[index])),
                            'image': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img.tobytes()])),
                            'width': tf.train.Feature(int64_list=tf.train.Int64List(value=[w])),
                            'height': tf.train.Feature(int64_list=tf.train.Int64List(value=[h])),
                        }))
                    tfrecord_writer.write(example.SerializeToString())
                    if i % 5000:
                        logging.info('solved {} examples. {}: {}'.format(i, label, index))
                    i += 1
                except Exception as e:
                    logging.error(e)
                    e.with_traceback()
                    continue
    logging.info('done.')


if __name__ == "__main__":
    if len(sys.argv) <= 1:
        logging.error('send a pattern like this: {}'.format('./GntTest/'))
    else:
        p = sys.argv[1]
        logging.info('converting from: {}'.format(p))
        run(p)
コード例 #15
0
                            'image':
                            tf.train.Feature(bytes_list=tf.train.BytesList(
                                value=[img.tobytes()])),
                            'width':
                            tf.train.Feature(int64_list=tf.train.Int64List(
                                value=[w])),
                            'height':
                            tf.train.Feature(int64_list=tf.train.Int64List(
                                value=[h])),
                        }))
                    tfrecord_writer.write(example.SerializeToString())
                    if i % 5000:
                        logging.info('solved {} examples. {}: {}'.format(
                            i, label, index))
                    i += 1
                except Exception as e:
                    logging.error(e)
                    e.with_traceback()
                    continue
    logging.info('done.')


if __name__ == "__main__":
    if len(sys.argv) <= 1:
        logging.error('send a pattern like this: {}'.format(
            './hwdb_raw/HWDB1.1trn_gnt/'))
    else:
        p = sys.argv[1]
        logging.info('converting from: {}'.format(p))
        run(p)
コード例 #16
0
def main(_argv):
    if FLAGS.tiny:
        logging.info('using YoloV3 Tiny model.')
        model = YoloV3Tiny(FLAGS.size, training=True)
        anchors = yolo_tiny_anchors
        anchor_masks = yolo_tiny_anchor_masks
    else:
        logging.info('using YoloV3 model.')
        # model = YoloV3(FLAGS.size, training=True)
        model = YoloV3Model(FLAGS.size)
        anchors = yolo_anchors
        anchor_masks = yolo_anchor_masks

    train_dataset = dataset.load_fake_dataset()
    if FLAGS.dataset and os.path.exists(os.path.dirname(FLAGS.dataset)):
        logging.info(f'loading dataset from: {FLAGS.dataset}')
        train_dataset = dataset.load_tfrecord_dataset(FLAGS.dataset,
                                                      FLAGS.classes, normalize_box=True)
    else:
        logging.info(
            '{} can not found, did you changed to your machine path?'.format(FLAGS.dataset))
        exit(0)
    train_dataset = train_dataset.shuffle(buffer_size=128)  # TODO: not 1024
    train_dataset = train_dataset.batch(FLAGS.batch_size)
    train_dataset = train_dataset.map(lambda x, y, w, h: (dataset.transform_images(
        x, FLAGS.size), dataset.transform_targets(y, anchors, anchor_masks, 80)
    ))
    train_dataset = train_dataset.prefetch(
        buffer_size=tf.data.experimental.AUTOTUNE)
    logging.info('train dataset loaded.')
    val_dataset = dataset.load_fake_dataset()
    if FLAGS.val_dataset:
        val_dataset = dataset.load_tfrecord_dataset(FLAGS.val_dat9aset,
                                                    FLAGS.classes)
    val_dataset = val_dataset.batch(FLAGS.batch_size)
    val_dataset = val_dataset.map(lambda x, y: (dataset.transform_images(
        x, FLAGS.size), dataset.transform_targets(y, anchors, anchor_masks, 80)
    ))

    # transfer from yolov3 darknet, free head layers
    if FLAGS.transfer != 'none':
        if FLAGS.transfer == 'fine_tune':
            darknet = model.get_layer('yolo_darknet')
            freeze_all(darknet)
        elif FLAGS.mode == 'frozen':
            freeze_all(model)
        else:
            # reset top layers
            if FLAGS.tiny:  # get initial weights
                init_model = YoloV3Tiny(FLAGS.size, training=True)
            else:
                # init_model = YoloV3(FLAGS.size, training=True)
                init_model = YoloV3Model(FLAGS.size)
            if FLAGS.transfer == 'darknet':
                for l in model.layers:
                    if l.name != 'yolo_darknet' and l.name.startswith('yolo_'):
                        l.set_weights(
                            init_model.get_layer(l.name).get_weights())
                    else:
                        freeze_all(l)
            elif FLAGS.transfer == 'no_output':
                for l in model.layers:
                    if l.name.startswith('yolo_output'):
                        l.set_weights(
                            init_model.get_layer(l.name).get_weights())
                    else:
                        freeze_all(l)
    start_epoch = 1
    if FLAGS.resume:
        latest_cp = tf.train.latest_checkpoint(os.path.dirname(FLAGS.weights))
        if latest_cp:
            start_epoch = int(latest_cp.split('-')[1].split('.')[0])
            model.load_weights(latest_cp)
            logging.info('model resumed from: {}, start at epoch: {}'.format(
                latest_cp, start_epoch))
        else:
            logging.info(
                'passing resume since weights not there. training from scratch')

    optimizer = tf.keras.optimizers.Adam(lr=FLAGS.learning_rate)
    loss = [YoloLoss(anchors[mask]) for mask in anchor_masks]

    if FLAGS.mode == 'eager_tf':
        logging.info(
            'runs on tensorflow eager mode. note that this mode somewhat hard to converge loss.')
        avg_loss = tf.keras.metrics.Mean('loss', dtype=tf.float32)
        avg_val_loss = tf.keras.metrics.Mean('val_loss', dtype=tf.float32)

        for epoch in range(start_epoch, FLAGS.epochs + 1):
            for batch, (images, labels) in enumerate(train_dataset):
                try:
                    # print(images.shape)
                    # print([i.shape for i in labels])
                    with tf.GradientTape() as tape:
                        outputs = model(images)
                        regularization_loss = tf.reduce_sum(model.losses)
                        pred_loss = []
                        for output, label, loss_fn in zip(outputs, labels, loss):
                            # print(output)
                            # print(label)
                            # print(loss_fn)
                            pred_loss.append(loss_fn(label, output))
                        # print(pred_loss)
                        total_loss = tf.reduce_sum(
                            pred_loss) + regularization_loss
                    grads = tape.gradient(
                        total_loss, model.trainable_variables)
                    optimizer.apply_gradients(
                        zip(grads, model.trainable_variables))
                    avg_loss.update_state(total_loss)

                    if batch % 40 == 0 and batch != 0:
                        logging.info("Epoch: {}, iter: {}, total_loss: {:.4f}, scale_losses: {}".format(
                            epoch, batch, total_loss.numpy(), list(map(lambda x: np.sum(x.numpy()), pred_loss))))
                        test_img = images[0]
                        # test_model(model, test_img, epoch, batch)
                    if batch % 500 == 0 and batch != 0:
                        logging.info('save model periodically...')
                        model.save_weights(FLAGS.weights.format(epoch))
                        # inference to see the result

                except KeyboardInterrupt:
                    logging.info('interrupted. try saving model now...')
                    model.save_weights(FLAGS.weights.format(epoch))
                    logging.info('model has been saved.')
                    exit(0)
                except Exception as e:
                    logging.info('got an unexpected error: {}, continue...'.format(
                        e.with_traceback(0)))
                    continue
            if epoch % FLAGS.val_per_epoch == 0 and epoch != 0:
                for batch, (images, labels) in enumerate(val_dataset):
                    outputs = model(images)
                    regularization_loss = tf.reduce_sum(model.losses)
                    pred_loss = []
                    for output, label, loss_fn in zip(outputs, labels, loss):
                        pred_loss.append(loss_fn(label, output))
                    total_loss = tf.reduce_sum(pred_loss) + regularization_loss

                    logging.info("{}_val_{}, {}, {}".format(
                        epoch, batch, total_loss.numpy(),
                        list(map(lambda x: np.sum(x.numpy()), pred_loss))))
                    avg_val_loss.update_state(total_loss)
                logging.info("{}, train: {}, val: {}".format(
                    epoch,
                    avg_loss.result().numpy(),
                    avg_val_loss.result().numpy()))

                avg_loss.reset_states()
                avg_val_loss.reset_states()
        model.save_weights(FLAGS.weights.format(epoch))
        # save final model (both arcchitecture and weights)
        model.save('yolov2_coco.h5')
        logging.info('training done.')
        exit(0)
    else:
        model.compile(optimizer=optimizer,
                      loss=loss,
                      run_eagerly=(FLAGS.mode == 'eager_fit'))
        callbacks = [
            ReduceLROnPlateau(verbose=1),
            EarlyStopping(patience=3, verbose=1),
            ModelCheckpoint('checkpoints/yolov3_weights.{epoch:02d}-{val_loss:.2f}.ckpt',
                            verbose=1,
                            save_weights_only=True,
                            save_best_only=False, period=1),
            TensorBoard(log_dir='logs')
        ]
        try:
            model.fit(train_dataset,
                      epochs=FLAGS.epochs,
                      callbacks=callbacks,
                      validation_data=val_dataset)
        except KeyboardInterrupt:
            logging.error('interrupted. try saving model now.')
            model.save_weights(FLAGS.weights.format(0))
コード例 #17
0
def load_data(dataset='coco', root_dir=''):
    """
    this function will support coco and VOC
    """
    if dataset == 'coco':
        for s in ['train', 'val']:
            for year in ['2014', '2017']:
                # every set, year will be yeild
                # data = []
                imgs_dir = os.path.join(root_dir, '{}{}'.format(s, year))
                anno_f = os.path.join(root_dir, 'annotations',
                                      'instances_{}{}.json'.format(s, year))
                if os.path.exists(imgs_dir) and os.path.exists(anno_f):
                    logging.info('solving COCO: {} {}'.format(s, year))
                    coco = COCO(anno_f)
                    # totally 82783 images
                    img_ids = coco.getImgIds()
                    # 90 categories (not continues, actually only has 80)
                    cat_ids = coco.getCatIds()

                    for idx, img_id in enumerate(img_ids):
                        if idx % 500 == 0:
                            logging.info('Reading images: %d/%d' %
                                         (idx, len(img_ids)))
                        img_info = dict()
                        bboxes = []
                        labels = []

                        img_detail = coco.loadImgs(img_id)[0]
                        h = img_detail['height']
                        w = img_detail['width']

                        ann_ids = coco.getAnnIds(imgIds=img_id, catIds=cat_ids)
                        anns = coco.loadAnns(ann_ids)
                        for ann in anns:
                            bboxes_data = ann['bbox']
                            # normalize box
                            bboxes_data = [
                                bboxes_data[0] / float(w),
                                bboxes_data[1] / float(h),
                                bboxes_data[2] / float(w),
                                bboxes_data[3] / float(h),
                            ]
                            bboxes.append(bboxes_data)
                            # this category_id should be remap
                            labels.append(coco_remap_dict[int(
                                ann['category_id'])])
                        # read image data we need
                        img_path = os.path.join(imgs_dir,
                                                img_detail['file_name'])
                        img_bytes = open(img_path, 'rb').read()

                        img_info['pixel_data'] = img_bytes
                        img_info['height'] = h
                        img_info['width'] = w
                        img_info['bboxes'] = bboxes
                        img_info['labels'] = labels
                        yield img_info
                        # data.append(img_info)
                    # yield data
                else:
                    logging.error('{} {} does not exist, passing it.'.format(
                        s, year))
                    logging.error('{} and {} not exist.'.format(
                        imgs_dir, anno_f))

    else:
        # TODO: adding VOC, KITTI, converting
        logging.error('{} not supported yet.'.format(dataset))
コード例 #18
0
def eval_voc(args):
    if args.ignore is None:
        args.ignore = []

    specific_iou_flagged = False
    if args.set_class_iou is not None:
        specific_iou_flagged = True

    GT_PATH = args.gt_dir
    DR_PATH = args.det_dir
    IMG_PATH = args.images_dir
    MINOVERLAP = args.min_overlap

    logging.info('Ground truth dir: {}'.format(GT_PATH))
    logging.info('Detection result dir: {}'.format(DR_PATH))
    logging.info('Images dir: {}'.format(IMG_PATH))
    logging.info('Min overlap: {}'.format(MINOVERLAP))

    if os.path.exists(IMG_PATH):
        for dirpath, dirnames, files in os.walk(IMG_PATH):
            if not files:
                # no image files found
                args.no_animation = True
    else:
        args.no_animation = True
    show_animation = False
    if not args.no_animation:
        try:
            import cv2
            show_animation = True
        except ImportError:
            print("\"opencv-python\" not found, please install to visualize the results.")
            args.no_animation = True
    draw_plot = False
    if not args.no_plot:
        try:
            import matplotlib.pyplot as plt
            draw_plot = True
        except ImportError:
            print(
                "\"matplotlib\" not found, please install it to get the resulting plots.")
            args.no_plot = True

    TEMP_FILES_PATH = os.path.join(os.path.dirname(GT_PATH), "temp_files")
    logging.info('creating a temp path: {}'.format(os.path.abspath(TEMP_FILES_PATH)))
    if not os.path.exists(TEMP_FILES_PATH):  # if it doesn't exist already
        os.makedirs(TEMP_FILES_PATH)
    results_files_path = "./results"
    if os.path.exists(results_files_path):  # if it exist already
        # reset the results directory
        shutil.rmtree(results_files_path)

    os.makedirs(results_files_path)
    if draw_plot:
        os.makedirs(os.path.join(results_files_path, "classes"))
    if show_animation:
        os.makedirs(os.path.join(results_files_path,
                                 "images", "detections_one_by_one"))

    # get a list with the ground-truth files
    # Make can solve both txt ground truth and xml ground truth
    if os.path.isfile(GT_PATH):
        logging.info('{} is a file, eval on coco not support now.'.format(GT_PATH))
        exit(0)
    else:
        all_files_gt = os.listdir(GT_PATH)
        ground_truth_files_list = []
        gt_format = 'txt'
        if all_files_gt[0].endswith('txt'):
            logging.info('detected your ground truth were txt format, start eval....')
            ground_truth_files_list = glob.glob(os.path.join(GT_PATH, '*.txt'))
        elif all_files_gt[0].endswith('xml'):
            logging.info('detected your ground truth were xml format, start eval....')
            ground_truth_files_list = glob.glob(os.path.join(GT_PATH, '*.xml'))
            gt_format = 'xml'
        else:
            logging.error('unsupported ground truth format, pls using xml or txt as ground truth format.')
            exit(0)

    if len(ground_truth_files_list) == 0:
        logging.error("Error: No ground-truth files found!")
        exit(0)
    ground_truth_files_list.sort()
    # dictionary with counter per class
    gt_counter_per_class = {}
    counter_images_per_class = {}

    # todo: Ground truth can be txt or xml both can be converted
    for gt_file in ground_truth_files_list:
        file_id = os.path.basename(gt_file).split('.')[0]
        # check if there is a correspondent detection-results file
        temp_path = os.path.join(DR_PATH, (file_id + ".txt"))
        if not os.path.exists(temp_path):
            error_msg = "Error. File not found: {}\n".format(temp_path)
            error_msg += "(You can avoid this error message by running extra/intersect-gt-and-dr.py)"
            logging.error(error_msg)
        lines_list = load_txt_or_xml_format(gt_file)
        # create ground-truth dictionary
        bounding_boxes = []
        is_difficult = False
        already_seen_classes = []
        for line in lines_list:
            try:
                if "difficult" in line:
                    class_name, left, top, right, bottom, _difficult = line.split()
                    is_difficult = True
                else:
                    class_name, left, top, right, bottom = line.split()
            except ValueError:
                error_msg = "Error: File " + gt_file + " in the wrong format.\n"
                error_msg += " Expected: <class_name> <left> <top> <right> <bottom> ['difficult']\n"
                error_msg += " Received: " + line
                error_msg += "\n\nIf you have a <class_name> with spaces between words you should remove them\n"
                error_msg += "by running the script \"remove_space.py\" or \"rename_class.py\" in the \"extra/\" folder."
                logging.error(error_msg)
            # check if class is in the ignore list, if yes skip
            if class_name in args.ignore:
                continue
            bbox = left + " " + top + " " + right + " " + bottom
            if is_difficult:
                bounding_boxes.append(
                    {"class_name": class_name, "bbox": bbox, "used": False, "difficult": True})
                is_difficult = False
            else:
                bounding_boxes.append(
                    {"class_name": class_name, "bbox": bbox, "used": False})
                # count that object
                if class_name in gt_counter_per_class:
                    gt_counter_per_class[class_name] += 1
                else:
                    # if class didn't exist yet
                    gt_counter_per_class[class_name] = 1

                if class_name not in already_seen_classes:
                    if class_name in counter_images_per_class:
                        counter_images_per_class[class_name] += 1
                    else:
                        # if class didn't exist yet
                        counter_images_per_class[class_name] = 1
                    already_seen_classes.append(class_name)
        # dump bounding_boxes into a ".json" file
        with open(TEMP_FILES_PATH + "/" + file_id + "_ground_truth.json", 'w') as outfile:
            json.dump(bounding_boxes, outfile)

    gt_classes = list(gt_counter_per_class.keys())
    logging.info('gt_classes gathered: {}'.format(gt_classes))
    # let's sort the classes alphabetically
    gt_classes = sorted(gt_classes)
    n_classes = len(gt_classes)
    # print(gt_classes)
    # print(gt_counter_per_class)

    if specific_iou_flagged:
        n_args = len(args.set_class_iou)
        error_msg = \
            '\n --set-class-iou [class_1] [IoU_1] [class_2] [IoU_2] [...]'
        if n_args % 2 != 0:
            logging.error('Error, missing arguments. Flag usage:' + error_msg)
        # [class_1] [IoU_1] [class_2] [IoU_2]
        # specific_iou_classes = ['class_1', 'class_2']
        specific_iou_classes = args.set_class_iou[::2]  # even
        # iou_list = ['IoU_1', 'IoU_2']
        iou_list = args.set_class_iou[1::2]  # odd
        if len(specific_iou_classes) != len(iou_list):
            logging.error('Error, missing arguments. Flag usage:' + error_msg)
        for tmp_class in specific_iou_classes:
            if tmp_class not in gt_classes:
                logging.error('Error, unknown class \"' + tmp_class +
                      '\". Flag usage:' + error_msg)
        for num in iou_list:
            if not is_float_between_0_and_1(num):
                logging.error('Error, IoU must be between 0.0 and 1.0. Flag usage:' + error_msg)

    # get a list with the detection-results files
    dr_files_list = glob.glob(os.path.join(DR_PATH, '*.txt'))
    logging.info('detection files detected: {}, vs ground truth: {}'.format(len(dr_files_list), len(ground_truth_files_list)))
    dr_files_list.sort()

    for class_index, class_name in enumerate(gt_classes):
        bounding_boxes = []
        for txt_file in dr_files_list:
            # print(txt_file)
            # the first time it checks if all the corresponding ground-truth files exist
            file_id = os.path.basename(txt_file).split(".")[0]
            temp_path = ''
            if gt_format == 'txt':
                temp_path = os.path.join(GT_PATH, (file_id + ".txt"))
            elif gt_format == 'xml':
                temp_path = os.path.join(GT_PATH, (file_id + ".xml"))

            if class_index == 0:
                if not os.path.exists(temp_path):
                    error_msg = "Error. according ground truth File not found: {}\n".format(temp_path)
                    error_msg += "(You can avoid this error message by running extra/intersect-gt-and-dr.py)"
                    logging.error(error_msg)
            lines = load_txt_or_xml_format(txt_file)
            for line in lines:
                try:
                    tmp_class_name, confidence, left, top, right, bottom = line.split()
                except ValueError:
                    error_msg = "Error: File " + txt_file + " in the wrong format.\n"
                    error_msg += " Expected: <class_name> <confidence> <left> <top> <right> <bottom>\n"
                    error_msg += " Received: " + line
                    logging.error(error_msg)
                if tmp_class_name == class_name:
                    # print("match")
                    bbox = left + " " + top + " " + right + " " + bottom
                    bounding_boxes.append(
                        {"confidence": confidence, "file_id": file_id, "bbox": bbox})
                    # print(bounding_boxes)
        # sort detection-results by decreasing confidence
        bounding_boxes.sort(key=lambda x: float(x['confidence']), reverse=True)
        with open(TEMP_FILES_PATH + "/" + class_name + "_dr.json", 'w') as outfile:
            json.dump(bounding_boxes, outfile)
    logging.info('ground truth and det files solved, start calculating mAP...')
    sum_AP = 0.0
    ap_dictionary = {}
    lamr_dictionary = {}
    # open file to store the results
    with open(results_files_path + "/results.txt", 'w') as results_file:
        results_file.write("# AP and precision/recall per class\n")
        count_true_positives = {}
        for class_index, class_name in enumerate(gt_classes):
            count_true_positives[class_name] = 0
            """
            Load detection-results of that class
            """
            dr_file = TEMP_FILES_PATH + "/" + class_name + "_dr.json"
            dr_data = json.load(open(dr_file))

            """
            Assign detection-results to ground-truth objects
            """
            nd = len(dr_data)
            tp = [0] * nd  # creates an array of zeros of size nd
            fp = [0] * nd
            for idx, detection in enumerate(dr_data):
                file_id = detection["file_id"]
                if show_animation:
                    # find ground truth image
                    ground_truth_img = glob.glob1(IMG_PATH, file_id + ".*")
                    #tifCounter = len(glob.glob1(myPath,"*.tif"))
                    if len(ground_truth_img) == 0:
                        logging.error("Error. Image not found with id: " + file_id)
                    elif len(ground_truth_img) > 1:
                        logging.error("Error. Multiple image with id: " + file_id)
                    else:  # found image
                        #print(IMG_PATH + "/" + ground_truth_img[0])
                        # Load image
                        img = cv2.imread(IMG_PATH + "/" + ground_truth_img[0])
                        # load image with draws of multiple detections
                        img_cumulative_path = results_files_path + \
                            "/images/" + ground_truth_img[0]
                        if os.path.isfile(img_cumulative_path):
                            img_cumulative = cv2.imread(img_cumulative_path)
                        else:
                            img_cumulative = img.copy()
                        # Add bottom border to image
                        bottom_border = 60
                        BLACK = [0, 0, 0]
                        img = cv2.copyMakeBorder(
                            img, 0, bottom_border, 0, 0, cv2.BORDER_CONSTANT, value=BLACK)
                # assign detection-results to ground truth object if any
                # open ground-truth with that file_id
                gt_file = TEMP_FILES_PATH + "/" + file_id + "_ground_truth.json"
                ground_truth_data = json.load(open(gt_file))
                ovmax = -1
                gt_match = -1
                # load detected object bounding-box
                bb = [float(x) for x in detection["bbox"].split()]
                for obj in ground_truth_data:
                    # look for a class_name match
                    if obj["class_name"] == class_name:
                        bbgt = [float(x) for x in obj["bbox"].split()]
                        bi = [max(bb[0], bbgt[0]), max(bb[1], bbgt[1]),
                              min(bb[2], bbgt[2]), min(bb[3], bbgt[3])]
                        iw = bi[2] - bi[0] + 1
                        ih = bi[3] - bi[1] + 1
                        if iw > 0 and ih > 0:
                            # compute overlap (IoU) = area of intersection / area of union
                            ua = (bb[2] - bb[0] + 1) * (bb[3] - bb[1] + 1) + (bbgt[2] - bbgt[0]
                                                                              + 1) * (bbgt[3] - bbgt[1] + 1) - iw * ih
                            ov = iw * ih / ua
                            if ov > ovmax:
                                ovmax = ov
                                gt_match = obj

                # assign detection as true positive/don't care/false positive
                if show_animation:
                    status = "NO MATCH FOUND!"  # status is only used in the animation
                # set minimum overlap
                min_overlap = MINOVERLAP
                if specific_iou_flagged:
                    if class_name in specific_iou_classes:
                        index = specific_iou_classes.index(class_name)
                        min_overlap = float(iou_list[index])
                if ovmax >= min_overlap:
                    if "difficult" not in gt_match:
                        if not bool(gt_match["used"]):
                            # true positive
                            tp[idx] = 1
                            gt_match["used"] = True
                            count_true_positives[class_name] += 1
                            # update the ".json" file
                            with open(gt_file, 'w') as f:
                                f.write(json.dumps(ground_truth_data))
                            if show_animation:
                                status = "MATCH!"
                        else:
                            # false positive (multiple detection)
                            fp[idx] = 1
                            if show_animation:
                                status = "REPEATED MATCH!"
                else:
                    # false positive
                    fp[idx] = 1
                    if ovmax > 0:
                        status = "INSUFFICIENT OVERLAP"

                """
                Draw image to show animation
                """
                if show_animation:
                    height, widht = img.shape[:2]
                    # colors (OpenCV works with BGR)
                    white = (255, 255, 255)
                    light_blue = (255, 200, 100)
                    green = (0, 255, 0)
                    light_red = (30, 30, 255)
                    # 1st line
                    margin = 10
                    v_pos = int(height - margin - (bottom_border / 2.0))
                    text = "Image: " + ground_truth_img[0] + " "
                    img, line_width = draw_text_in_image(
                        img, text, (margin, v_pos), white, 0)
                    text = "Class [" + str(class_index) + "/" + \
                        str(n_classes) + "]: " + class_name + " "
                    img, line_width = draw_text_in_image(
                        img, text, (margin + line_width, v_pos), light_blue, line_width)
                    if ovmax != -1:
                        color = light_red
                        if status == "INSUFFICIENT OVERLAP":
                            text = "IoU: {0:.2f}% ".format(
                                ovmax*100) + "< {0:.2f}% ".format(min_overlap*100)
                        else:
                            text = "IoU: {0:.2f}% ".format(
                                ovmax*100) + ">= {0:.2f}% ".format(min_overlap*100)
                            color = green
                        img, _ = draw_text_in_image(
                            img, text, (margin + line_width, v_pos), color, line_width)
                    # 2nd line
                    v_pos += int(bottom_border / 2.0)
                    rank_pos = str(idx+1)  # rank position (idx starts at 0)
                    text = "Detection #rank: " + rank_pos + \
                        " confidence: {0:.2f}% ".format(
                            float(detection["confidence"])*100)
                    img, line_width = draw_text_in_image(
                        img, text, (margin, v_pos), white, 0)
                    color = light_red
                    if status == "MATCH!":
                        color = green
                    text = "Result: " + status + " "
                    img, line_width = draw_text_in_image(
                        img, text, (margin + line_width, v_pos), color, line_width)

                    font = cv2.FONT_HERSHEY_SIMPLEX
                    if ovmax > 0:  # if there is intersections between the bounding-boxes
                        bbgt = [int(round(float(x)))
                                for x in gt_match["bbox"].split()]
                        cv2.rectangle(img, (bbgt[0], bbgt[1]),
                                      (bbgt[2], bbgt[3]), light_blue, 2)
                        cv2.rectangle(
                            img_cumulative, (bbgt[0], bbgt[1]), (bbgt[2], bbgt[3]), light_blue, 2)
                        cv2.putText(img_cumulative, class_name,
                                    (bbgt[0], bbgt[1] - 5), font, 0.6, light_blue, 1, cv2.LINE_AA)
                    bb = [int(i) for i in bb]
                    cv2.rectangle(img, (bb[0], bb[1]),
                                  (bb[2], bb[3]), color, 2)
                    cv2.rectangle(img_cumulative,
                                  (bb[0], bb[1]), (bb[2], bb[3]), color, 2)
                    cv2.putText(img_cumulative, class_name,
                                (bb[0], bb[1] - 5), font, 0.6, color, 1, cv2.LINE_AA)
                    # show image
                    cv2.imshow("Animation", img)
                    cv2.waitKey(20)  # show for 20 ms
                    # save image to results
                    output_img_path = results_files_path + "/images/detections_one_by_one/" + \
                        class_name + "_detection" + str(idx) + ".jpg"
                    cv2.imwrite(output_img_path, img)
                    # save the image with all the objects drawn to it
                    cv2.imwrite(img_cumulative_path, img_cumulative)

            # print(tp)
            # compute precision/recall
            cumsum = 0
            for idx, val in enumerate(fp):
                fp[idx] += cumsum
                cumsum += val
            cumsum = 0
            for idx, val in enumerate(tp):
                tp[idx] += cumsum
                cumsum += val
            # print(tp)
            rec = tp[:]
            for idx, val in enumerate(tp):
                rec[idx] = float(tp[idx]) / gt_counter_per_class[class_name]
            # print(rec)
            prec = tp[:]
            for idx, val in enumerate(tp):
                prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx])
            # print(prec)

            ap, mrec, mprec = voc_ap(rec[:], prec[:])
            sum_AP += ap
            # class_name + " AP = {0:.2f}%".format(ap*100)
            text = "{0:.2f}%".format(ap*100) + " = " + class_name + " AP "
            """
            Write to results.txt
            """
            rounded_prec = ['%.2f' % elem for elem in prec]
            rounded_rec = ['%.2f' % elem for elem in rec]
            results_file.write(text + "\n Precision: " + str(rounded_prec) +
                               "\n Recall :" + str(rounded_rec) + "\n\n")
            if not args.quiet:
                print(text)
            ap_dictionary[class_name] = ap

            n_images = counter_images_per_class[class_name]
            lamr, mr, fppi = log_average_miss_rate(
                np.array(rec), np.array(fp), n_images)
            lamr_dictionary[class_name] = lamr

            """
            Draw plot
            """
            if draw_plot:
                plt.plot(rec, prec, '-o')
                # add a new penultimate point to the list (mrec[-2], 0.0)
                # since the last line segment (and respective area) do not affect the AP value
                area_under_curve_x = mrec[:-1] + [mrec[-2]] + [mrec[-1]]
                area_under_curve_y = mprec[:-1] + [0.0] + [mprec[-1]]
                plt.fill_between(area_under_curve_x, 0,
                                 area_under_curve_y, alpha=0.2, edgecolor='r')
                # set window title
                fig = plt.gcf()  # gcf - get current figure
                fig.canvas.set_window_title('AP ' + class_name)
                # set plot title
                plt.title('class: ' + text)
                #plt.suptitle('This is a somewhat long figure title', fontsize=16)
                # set axis titles
                plt.xlabel('Recall')
                plt.ylabel('Precision')
                # optional - set axes
                axes = plt.gca()  # gca - get current axes
                axes.set_xlim([0.0, 1.0])
                axes.set_ylim([0.0, 1.05])  # .05 to give some extra space
                # Alternative option -> wait for button to be pressed
                # while not plt.waitforbuttonpress(): pass # wait for key display
                # Alternative option -> normal display
                # plt.show()
                # save the plot
                fig.savefig(results_files_path +
                            "/classes/" + class_name + ".png")
                plt.cla()  # clear axes for next plot

        if show_animation:
            cv2.destroyAllWindows()

        results_file.write("\n# mAP of all classes\n")
        mAP = sum_AP / n_classes
        text = "mAP = {0:.2f}%".format(mAP*100)
        results_file.write(text + "\n")
        print(text)

    # remove the temp_files directory
    shutil.rmtree(TEMP_FILES_PATH)

    det_counter_per_class = {}
    for txt_file in dr_files_list:
        # get lines to list
        lines_list = load_txt_or_xml_format(txt_file)
        for line in lines_list:
            class_name = line.split()[0]
            # check if class is in the ignore list, if yes skip
            if class_name in args.ignore:
                continue
            # count that object
            if class_name in det_counter_per_class:
                det_counter_per_class[class_name] += 1
            else:
                # if class didn't exist yet
                det_counter_per_class[class_name] = 1
    # print(det_counter_per_class)
    dr_classes = list(det_counter_per_class.keys())

    if draw_plot:
        window_title = "ground-truth-info"
        plot_title = "ground-truth\n"
        plot_title += "(" + str(len(ground_truth_files_list)) + \
            " files and " + str(n_classes) + " classes)"
        x_label = "Number of objects per class"
        output_path = results_files_path + "/ground-truth-info.png"
        to_show = False
        plot_color = 'forestgreen'
        draw_plot_func(
            gt_counter_per_class,
            n_classes,
            window_title,
            plot_title,
            x_label,
            output_path,
            to_show,
            plot_color,
            '',
        )

    with open(results_files_path + "/results.txt", 'a') as results_file:
        results_file.write("\n# Number of ground-truth objects per class\n")
        for class_name in sorted(gt_counter_per_class):
            results_file.write(class_name + ": " +
                               str(gt_counter_per_class[class_name]) + "\n")

    for class_name in dr_classes:
        # if class exists in detection-result but not in ground-truth then there are no true positives in that class
        if class_name not in gt_classes:
            count_true_positives[class_name] = 0
    # print(count_true_positives)

    # Saving results and ploting
    if draw_plot:
        window_title = "detection-results-info"
        # Plot title
        plot_title = "detection-results\n"
        plot_title += "(" + str(len(dr_files_list)) + " files and "
        count_non_zero_values_in_dictionary = sum(
            int(x) > 0 for x in list(det_counter_per_class.values()))
        plot_title += str(count_non_zero_values_in_dictionary) + \
            " detected classes)"
        # end Plot title
        x_label = "Number of objects per class"
        output_path = results_files_path + "/detection-results-info.png"
        to_show = False
        plot_color = 'forestgreen'
        true_p_bar = count_true_positives
        draw_plot_func(
            det_counter_per_class,
            len(det_counter_per_class),
            window_title,
            plot_title,
            x_label,
            output_path,
            to_show,
            plot_color,
            true_p_bar
        )

    with open(results_files_path + "/results.txt", 'a') as results_file:
        results_file.write("\n# Number of detected objects per class\n")
        for class_name in sorted(dr_classes):
            n_det = det_counter_per_class[class_name]
            text = class_name + ": " + str(n_det)
            text += " (tp:" + str(count_true_positives[class_name]) + ""
            text += ", fp:" + \
                str(n_det - count_true_positives[class_name]) + ")\n"
            results_file.write(text)

    if draw_plot:
        window_title = "lamr"
        plot_title = "log-average miss rate"
        x_label = "log-average miss rate"
        output_path = results_files_path + "/lamr.png"
        to_show = False
        plot_color = 'royalblue'
        draw_plot_func(
            lamr_dictionary,
            n_classes,
            window_title,
            plot_title,
            x_label,
            output_path,
            to_show,
            plot_color,
            ""
        )

    if draw_plot:
        window_title = "mAP"
        plot_title = "mAP = {0:.2f}%".format(mAP*100)
        x_label = "Average Precision"
        output_path = results_files_path + "/mAP.png"
        to_show = True
        plot_color = 'royalblue'
        draw_plot_func(
            ap_dictionary,
            n_classes,
            window_title,
            plot_title,
            x_label,
            output_path,
            to_show,
            plot_color,
            ""
        )
コード例 #19
0
ファイル: imgshow.py プロジェクト: linkenghong/HWR
def imgshow(char_index):
    train_dir = r'./data/train'
    path = os.path.join(train_dir, char_index)
    img_path_list = glob.glob(os.path.join(path, '*.png'))

    transform = transforms.Compose([
        transforms.Resize((64, 64)),
        transforms.ToTensor(),
    ])
    count = 1
    plt.figure()

    for p in img_path_list:
        if count > 4:
            break
        plt.subplot(2,2,count)
        input = Image.open(p).convert('RGB')
        input = transform(input)
        plt.imshow(input.permute(1,2,0))
        count += 1

    plt.show()

if __name__ == '__main__':
    if len(sys.argv) <= 1:
        logging.error('send a pattern like this: {}'.format('06203'))
    else:
        p = sys.argv[1]
        logging.info('show img from: {}'.format(p))
        imgshow(p)