Example #1
0
def main():
    print("[INFO] loading model...")
    model = ESNet(in_ch=3, n_class=1).to(DEVICE)
    load_checkpoint(torch.load(PRETRAINED_MODEL_PATH), model)
    model.eval()

    for image_name in os.listdir(DATASET_PATH):
        if image_name.endswith(".png"):
            print("[INFO] processing: " + image_name)
            full_path = os.path.join(DATASET_PATH, image_name)
            testing_image = cv2.imread(full_path)
            t_img = preprocess(testing_image, INPUT_SIZE)
            start = time.time()
            with torch.no_grad():
                preds = torch.sigmoid(model(t_img))
                preds = (preds > 0.5)
                end = time.time()
                run_time = end - start
            print("[INFO] inference took {:.4f} seconds".format(run_time))

            output, final_pred_mask = postprocess(testing_image, preds,
                                                  INPUT_SIZE)
            output_name = "output_" + image_name
            results_path = os.path.join(RESULTS_DIR, output_name)
            print("mask size: ", final_pred_mask.shape)

            cv2.imwrite(results_path, output)
Example #2
0
def predict():

    input_size = (416, 416)
    tf.app.flags.DEFINE_string('image_file', './images/person.jpg', 'image_path')
    FLAGS = tf.app.flags.FLAGS
    image_file = FLAGS.image_file
    image = cv2.imread(image_file)
    image_shape = image.shape[:2]
    image_cp = preprocess_image(image, input_size)

    images = tf.placeholder(tf.float32, [1, input_size[0], input_size[1], 3])
    detection_feat = darknet(images)
    feat_sizes = input_size[0] // 32, input_size[1] // 32
    detection_results = decode(detection_feat, feat_sizes, len(class_names), anchors)

    checkpoint_path = "./checkpoint_dir/yolo2_coco.ckpt"
    #checkpoint_path = "/Users/xiang/Downloads/DeepLearning_tutorials-master/ObjectDetections/yolo2/checkpoint_dir/yolo2_coco.ckpt"
    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess, checkpoint_path)
        bboxes, obj_probs, class_probs = sess.run(detection_results, feed_dict={images: image_cp})

    bboxes, scores, class_inds = postprocess(bboxes, obj_probs, class_probs,
                                             image_shape=image_shape)
    img_detection = draw_detection(image, bboxes, scores, class_inds, class_names)
    cv2.imwrite("./res/detection.jpg", img_detection)
    cv2.imshow("detection results", img_detection)
    print('*****Click the window,and press any key to close!*****')

    cv2.waitKey(0)
    cv2.destroyAllWindows()
Example #3
0
def main(args):
    with get_engine(args.engine_path, args.model_dir) as engine:
        with engine.create_execution_context() as context:
            origin_img = cv2.imread(args.image_path)
            t1 = time.time()
            img, (ratio_h, ratio_w) = preprocess(origin_img)
            cv2.imwrite("processed.jpg", img)
            h, w, _ = img.shape
            # hwc to chw
            img = img.transpose((2, 0, 1))
            # flatten the image into a 1D array
            img = img.ravel()
            context.set_binding_shape(0, (1, 3, h, w))
            # allocate buffers and create a stream.
            inputs, outputs, bindings, stream = common.allocate_buffers(
                engine, context)
            # copy to pagelocked memory
            np.copyto(inputs[0].host, img)
            # The common.do_inference function will return a list of outputs - we only have one in this case.
            [output] = common.do_inference_v2(context,
                                              bindings=bindings,
                                              inputs=inputs,
                                              outputs=outputs,
                                              stream=stream)
            # reshape 1D array to chw
            output = np.reshape(output, (6, h // 4, w // 4))
            # transpose chw to hwc
            output = output.transpose(1, 2, 0)
            boxes = postprocess(origin_img, output, ratio_h, ratio_w)
            t2 = time.time()
            print("total cost %fms" % ((t2 - t1) * 1000))
            draw_result(origin_img, boxes)
Example #4
0
def scrape_KCDC_citydo():
    """질병관리본부의 시도별 발생동향 수집"""
    html = requests.get(
        "http://ncov.mohw.go.kr/bdBoardList_Real.do?brdId=1&brdGubun=13&ncvContSeq=&contSeq=&board_id=&gubun=&fbclid=IwAR3NoNL_j1phitehSggDQedf7S165308xIEeG8ljACy-VRq-T5efcbcTK_s"
    )
    soup = BeautifulSoup(html.text, "html.parser")
    time = soup.select_one("p.info > span").text
    data = soup.select("table.num > tbody > tr")

    push = []
    new_data = {}
    for datum in data:
        name = datum.find("th").text
        number = datum.find_all("td")

        var_list = []
        for i in range(9):
            var_list.append(number[i].text)
        postproc = postprocess(var_list)

        new_data[name] = citydo.copy()
        new_data[name]["increasing"] = postproc[0]
        new_data[name]["cc_sum"] = postproc[1]
        new_data[name]["isolating"] = postproc[2]
        new_data[name]["recovered"] = postproc[3]
        new_data[name]["dead"] = postproc[4]
        new_data[name]["ch_sum"] = postproc[5]
        new_data[name]["ch_examined"] = postproc[6]
        new_data[name]["ch_negative"] = postproc[7]
        new_data[name]["total"] = postproc[8]
        new_data[name]["time"] = time
        push.append((name, new_data[name]))
    push_scraping_msg("scrape_domestic.py >> scrape_KCDC_citydo()", push)
    return new_data
Example #5
0
def single_inference(model, image_dict, post_process=False):

    with torch.no_grad():
        image, mask = image_dict['image'], image_dict['mask']
        alpha_shape = image_dict['alpha_shape']
        image = image.cuda()
        mask = mask.cuda()
        pred = model(image, mask)
        alpha_pred_os1, alpha_pred_os4, alpha_pred_os8 = pred[
            'alpha_os1'], pred['alpha_os4'], pred['alpha_os8']

        ### refinement
        alpha_pred = alpha_pred_os8.clone().detach()
        weight_os4 = utils.get_unknown_tensor_from_pred(
            alpha_pred,
            rand_width=CONFIG.model.self_refine_width1,
            train_mode=False)
        alpha_pred[weight_os4 > 0] = alpha_pred_os4[weight_os4 > 0]
        weight_os1 = utils.get_unknown_tensor_from_pred(
            alpha_pred,
            rand_width=CONFIG.model.self_refine_width2,
            train_mode=False)
        alpha_pred[weight_os1 > 0] = alpha_pred_os1[weight_os1 > 0]

        h, w = alpha_shape
        alpha_pred = alpha_pred[0, 0, ...].data.cpu().numpy()
        if post_process:
            alpha_pred = utils.postprocess(alpha_pred)
        alpha_pred = alpha_pred * 255
        alpha_pred = alpha_pred.astype(np.uint8)
        alpha_pred = alpha_pred[32:h + 32, 32:w + 32]

        return alpha_pred
Example #6
0
 def __call__(self):
     image_bgr = self.get_image()
     tensor = self.conv_tensor(image_bgr)
     pred = pybenchmark.profile('inference')(model._inference)(self.inference, torch.autograd.Variable(tensor, volatile=True))
     rows, cols = pred['feature'].size()[-2:]
     iou = pred['iou'].data.contiguous().view(-1)
     yx_min, yx_max = (pred[key].data.view(-1, 2) for key in 'yx_min, yx_max'.split(', '))
     logits = get_logits(pred)
     prob = F.softmax(logits, -1).data.view(-1, logits.size(-1))
     ret = postprocess(self.config, iou, yx_min, yx_max, prob)
     image_result = image_bgr.copy()
     if ret is not None:
         iou, yx_min, yx_max, cls, score = ret
         try:
             scale = self.scale
         except AttributeError:
             scale = utils.ensure_device(torch.from_numpy(np.array(image_result.shape[:2], np.float32) / np.array([rows, cols], np.float32)))
             self.scale = scale
         yx_min, yx_max = ((t * scale).cpu().numpy().astype(np.int) for t in (yx_min, yx_max))
         image_result = self.draw_bbox(image_result, yx_min, yx_max, cls)
     cv2.imshow('detection', image_result)
     if self.args.output:
         self.writer.write(image_result)
     if cv2.waitKey(0 if self.args.pause else 1) in self.keys:
         root = os.path.join(self.model_dir, 'snapshot')
         os.makedirs(root, exist_ok=True)
         path = os.path.join(root, time.strftime(self.args.format))
         cv2.imwrite(path, image_bgr)
         logging.warning('image dumped into ' + path)
Example #7
0
def segment(input_str, model_path='word_segmentation_model', seg_sep=' '):
    use_gpu = torch.cuda.is_available()
    if (use_gpu):
        print('Inference on GPU!')
    else:
        print('No GPU available, inference using CPU')
    model = torch.load(model_path)
    model.eval()
    x, skcc = preprocess(input_str, model)
    inputs = torch.tensor(x).unsqueeze(0).long()

    if (use_gpu):
        inputs = inputs.cuda()
    h = model.init_hidden(1)
    val_h = tuple([each.data for each in h])
    # get the output from the model
    pred, _ = model(inputs, val_h)
    if (use_gpu):
        pred = pred.cpu()  # move to cpu

    pred = torch.sigmoid(pred)

    pred[pred < 0.5] = 0.
    pred[pred >= 0.5] = 1.

    return postprocess(pred, skcc, seg_sep)
Example #8
0
 def __call__(self):
     image_bgr = self.get_image()
     image_resized = self.resize(image_bgr, self.height, self.width)
     image = self.transform_image(image_resized)
     tensor = self.transform_tensor(image)
     tensor = utils.ensure_device(tensor.unsqueeze(0))
     pred = pybenchmark.profile('inference')(model._inference)(self.inference, torch.autograd.Variable(tensor, volatile=True))
     rows, cols = pred['feature'].size()[-2:]
     iou = pred['iou'].data.contiguous().view(-1)
     yx_min, yx_max = (pred[key].data.view(-1, 2) for key in 'yx_min, yx_max'.split(', '))
     logits = get_logits(pred)
     prob = F.softmax(logits, -1).data.view(-1, logits.size(-1))
     ret = postprocess(self.config, iou, yx_min, yx_max, prob)
     image_result = image_bgr.copy()
     if ret is not None:
         iou, yx_min, yx_max, cls, score = ret
         try:
             scale = self.scale
         except AttributeError:
             scale = utils.ensure_device(torch.from_numpy(np.array(image_result.shape[:2], np.float32) / np.array([rows, cols], np.float32)))
             self.scale = scale
         yx_min, yx_max = ((t * scale).cpu().numpy().astype(np.int) for t in (yx_min, yx_max))
         image_result = self.draw_bbox(image_result, yx_min, yx_max, cls)
     if self.args.output:
         self.writer.write(image_result)
     else:
         cv2.imshow('detection', image_result)
     if cv2.waitKey(0 if self.args.pause else 1) in self.keys:
         root = os.path.join(self.model_dir, 'snapshot')
         os.makedirs(root, exist_ok=True)
         path = os.path.join(root, time.strftime(self.args.format))
         cv2.imwrite(path, image_bgr)
         logging.warning('image dumped into ' + path)
def scrape_worldOmeter(korea=True):
    """worldOmeter에서 세계 확진환자수, 격리해제수, 사망자수 수집

    Args:
        world: 대한민국 데이터만 수집하려면, True
               세계 데이터를 수집하려면, False

    Returns:
        (dict) 한국의 확진환자수(cc_sum), 격리해제수(recovered), 사망자수(dead)
    """
    html = requests.get("https://www.worldometers.info/coronavirus/")
    soup = BeautifulSoup(html.text, "html.parser")
    data = soup.select("#main_table_countries > tbody > tr")

    world_data = {}
    world_cc, world_recovered, world_dead = 0, 0, 0
    push = []
    for datum in data:
        country = datum.find_all("td")[0].text.strip()
        cc = datum.find_all("td")[1].text.strip()
        recovered = datum.find_all("td")[5].text.strip()
        dead = datum.find_all("td")[3].text.strip()
        postproc = postprocess([cc, recovered, dead])
        cc, recovered, dead = postproc[0], postproc[1], postproc[2]

        if cc:
            world_cc += cc
        if recovered:
            world_recovered += recovered
        if dead:
            world_dead += dead

        if korea:
            if country != "S. Korea":
                continue
            korea_patients = patients.copy()
            korea_patients["cc_sum"] = cc
            korea_patients["recovered"] = recovered
            korea_patients["dead"] = dead
            push.append(("대한민국", korea_patients))
            SlackHandler().add_scraping_msg(
                "scrape_korea.py >> scrape_worldOmeter()", push)
            return korea_patients

        world_data[country] = patients.copy()
        world_data[country]["cc_sum"] = cc
        world_data[country]["recovered"] = recovered
        world_data[country]["dead"] = dead
        push.append((country, world_data[country]))
        time.sleep(0.2)

    world_data["world"] = patients.copy()
    world_data["world"]["cc_sum"] = world_cc
    world_data["world"]["recovered"] = world_recovered
    world_data["world"]["dead"] = world_dead
    push.append(("world", world_data["world"]))

    SlackHandler().add_scraping_msg(
        "scrape_korea.py >> scrape_worldOmeter(korea=False)", push)
    return world_data
Example #10
0
def detect_objects(org_img, net):
    centernet_image_size = (512, 512)
    img = preprocess(org_img, centernet_image_size)
    net.predict(img)
    res = net.get_results()
    dets = postprocess([output[0] for output in res], (org_img.shape[1], org_img.shape[0]), K_VALUE, THRESHOLD)

    boxes = []
    scores = []
    cls_inds = []

    font_scale = 0.5
    font = cv2.FONT_HERSHEY_SIMPLEX

    for det in dets:
        # Make sure bboxes are not out of bounds
        xmin, ymin, xmax, ymax = det[:4].astype(np.int)
        xmin = max(0, xmin)
        ymin = max(0, ymin)
        xmax = min(org_img.shape[1], xmax)
        ymax = min(org_img.shape[0], ymax)

        boxes.append([xmin,ymin,xmax,ymax])
        scores.append(det[4])
        cls_inds.append(det[5])

    return boxes, scores, cls_inds
Example #11
0
 def synthesize_x1(self, X1_latent, parents=None):
     if isinstance(X1_latent, int):
         N = X1_latent
         X1_latent = np.random.uniform(size=(N, self.latent_dim[1]))
         X1_noise = np.random.normal(scale=0.5, size=(N, self.noise_dim[1]))
     else:
         N = X1_latent.shape[0]
         X1_noise = np.zeros((N, self.noise_dim[1]))
     if parents is None:
         X0 = self.synthesize_x0(1)[0]
     else:
         X0 = parents[0]
     X0 = preprocess(X0)
     X0 = np.tile(X0, (N,1,1,1))
     X1 = self.sess.run(self.x1_fake, feed_dict={self.c1: X1_latent, self.z1: X1_noise, self.x0_fake: X0})
     return [postprocess(X1), postprocess(X0)]
Example #12
0
 def synthesize_assemblies(self, N):
     X0_latent = np.random.uniform(size=(N, self.latent_dim[0]))
     X0_noise = np.random.normal(scale=0.5, size=(N, self.noise_dim[0]))
     X0 = self.sess.run(self.x0_fake,
                        feed_dict={
                            self.c0: X0_latent,
                            self.z0: X0_noise
                        })
     X1_latent = np.random.uniform(size=(N, self.latent_dim[1]))
     X1_noise = np.random.normal(scale=0.5, size=(N, self.noise_dim[1]))
     X1 = self.sess.run(self.x1_fake,
                        feed_dict={
                            self.c1: X1_latent,
                            self.z1: X1_noise,
                            self.x0_fake: X0
                        })
     X2_latent = np.random.uniform(size=(N, self.latent_dim[2]))
     X2_noise = np.random.normal(scale=0.5, size=(N, self.noise_dim[2]))
     X2 = self.sess.run(self.x2_fake,
                        feed_dict={
                            self.c2: X2_latent,
                            self.z2: X2_noise,
                            self.x0_fake: X0
                        })
     assemblies = np.concatenate((X0, X1, X2), axis=1)
     return postprocess(assemblies)
Example #13
0
def do_nucleus_sampling(models, das_test, cfg, absts):
    preds = run_nucleus_sampling(models, das_test, max_pred_len=60, cfg=cfg)
    preds = [[x for x in pred if x not in [START_TOK, END_TOK, PAD_TOK]]
             for pred in preds]
    if "res_save_format" in cfg:
        save_filename = cfg["res_save_format"].format(1)
    else:
        raise ValueError('Not saving files any where')

    if cfg.get("re-lexicalise", True):
        print("Applying abstract")
        post_abstr = apply_absts(absts, preds)
    else:
        print("Abstract not applied")
        post_abstr = preds

    save_path = os.path.join(RESULTS_DIR, save_filename)
    print("Saving to {}".format(save_path))
    parent = os.path.abspath(os.path.join(save_path, os.pardir))
    if not os.path.exists(parent):
        os.makedirs(parent)
    with open(save_path, "w+") as out_file:
        for pa in post_abstr:
            # out_file.write(" ".join(pa) + '\n')
            if cfg.get("re-lexicalise", True):
                out_file.write(postprocess(" ".join(pa)) + '\n')
            else:
                out_file.write(" ".join(pa) + '\n')
    print("Official bleu score:", test_res_official(save_filename))
def main():
    input_size = (416,416)
    image_file = './yolo2_data/car.jpg'
    image = cv2.imread(image_file)
    image_shape = image.shape[:2] #只取wh,channel=3不取

    # copy、resize416*416、归一化、在第0维增加存放batchsize维度
    image_cp = preprocess_image(image,input_size)

    # 【1】输入图片进入darknet19网络得到特征图,并进行解码得到:xmin xmax表示的边界框、置信度、类别概率
    tf_image = tf.placeholder(tf.float32,[1,input_size[0],input_size[1],3])
    model_output = darknet_slim(tf_image) # darknet19网络输出的特征图
    output_sizes = input_size[0]//32, input_size[1]//32 # 特征图尺寸是图片下采样32倍
    output_decoded = decode(model_output=model_output,output_sizes=output_sizes,
                               num_class=len(class_names),anchors=anchors)  # 解码

    model_path = "models/model.ckpt"
    init_fn = slim.assign_from_checkpoint_fn(model_path,slim.get_variables())
    with tf.Session() as sess:
        init_fn(sess)
        bboxes,obj_probs,class_probs = sess.run(output_decoded,feed_dict={tf_image:image_cp})

    # 【2】筛选解码后的回归边界框——NMS(post process后期处理)
    bboxes,scores,class_max_index = postprocess(bboxes,obj_probs,class_probs,image_shape=image_shape)

    # 【3】绘制筛选后的边界框
    img_detection = draw_detection(image, bboxes, scores, class_max_index, class_names)
    cv2.imwrite("./yolo2_data/detection.jpg", img_detection)
    print('YOLO_v2 detection has done!')
    cv2.imshow("detection_results", img_detection)
    cv2.waitKey(0)
Example #15
0
def synthesize(model, gan_type, code):
    """Synthesizes an image with the give code."""
    if gan_type == 'pggan':
        image = model(to_tensor(code))['image']
    elif gan_type in ['stylegan', 'stylegan2']:
        image = model.synthesis(to_tensor(code))['image']
    image = postprocess(image)[0]
    return image
Example #16
0
 def segment_crf(self, input_str, seg_sep=' '):
     ts = cleanup_str(input_str)
     kccs = seg_kcc(ts)
     features = create_kcc_features(kccs)
     preds = self.crfModel.predict([features])[0]
     preds = [float(p) for p in preds]
     seg = postprocess(preds, kccs, seg_sep)
     return seg
Example #17
0
def plot_channel_maps(args):
    # Line config
    if args.lineconfig:
        linecfg = ut.read_config(args.lineconfig[0])
    else:
        linecfg = None

    # Selected lines
    if args.lines:
        if linecfg is None:
            lines = None
        else:
            lines = args.lines
    elif linecfg is not None:
        lines = linecfg.sections()
    else:
        lines = None

    # Iterate over lines
    if linecfg is None:
        return _plot_single_channel_maps(args)
    else:
        plotbase = os.path.expanduser(args.plotname[0])
        for line in lines:
            # Assign values for this iteration
            if args.every[0] is None:
                args.every = [linecfg.getint(line, 'every', fallback=1)]
            args.chanran = map(int, linecfg.get(line, 'chanran').split())
            freq = linecfg.get(line, 'freq').split()
            freq = float(freq[0]) * u.Unit(freq[1])
            args.freq = [freq.to(u.GHz).value]

            # Plot
            fig = _plot_single_channel_maps(args)

            # Figure title
            fig.set_title(linecfg.get(line, 'title', fallback=''), ha='left')

            # Save fig
            plotname = os.path.splitext(plotbase)
            plotname = ('.%s.chanmap' % line).join(plotname)
            args.plotname = [plotname]
            args.logger.info('Saving figure: %s', args.plotname[0])
            ut.postprocess(fig, args)

    return None
def main():
    input_size = (416, 416)
    # image_file = "/home/zdq/darknet/data/1.jpg"
    # image = cv2.imread(image_file)
    image_shape = image.shape[:2]
    image_cp = preprocess_image(image, input_size)

    images = tf.placeholder(tf.float32, [1, input_size[0], input_size[1], 3])
    detection_feat = darknet(images)
    feat_sizes = input_size[0] // 32, input_size[1] // 32
    detection_results = decode(detection_feat, feat_sizes, len(class_names),
                               anchors)

    checkpoint_path = "/home/zdq/YOLO/checkpoint_dir/yolo2_coco.ckpt"
    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess, checkpoint_path)
        bboxes, obj_probs, class_probs = sess.run(detection_results,
                                                  feed_dict={images: image_cp})

    bboxes, scores, class_inds = postprocess(bboxes,
                                             obj_probs,
                                             class_probs,
                                             image_shape=image_shape)
    img_detection = draw_detection(image, bboxes, scores, class_inds,
                                   class_names)  #回归框,得到矩阵框的X左右,Y下像素坐标
    print('\n')

    # #检测车道线的像素,并放入字典中
    # lane_cor = {}
    # #手动选取ROI,待修改
    # vertices = np.array([[(110, 194), (110, 0), (150, 0), (150, 194)]], dtype=np.int32)
    # roi = region_of_interest(line_img, vertices)
    # # cv2.imshow("roi", roi)
    # for i in range(0, (roi.shape)[0]):
    #     for j in range(0, (roi.shape)[1]):
    #         if roi[i, j, 2] == 255:         #roi[i,j,num]这里num代表着BGR第几通道
    #             lane_cor[i] = j
    # print("The coodinate of the detected_lane y:x")
    #
    # print(lane_cor)
    #
    # global box
    # if (utils.box[0] + m * (utils.box[2] - utils.box[0])) <= lane_cor[utils.box[3]] <= (utils.box[2] - m * (utils.box[2] - utils.box[0])):
    #     print("The car is on the solid line!!!")
    # else:
    #     print("The car is permitted~")

    # mix1 = weight_add(img_detection, line_img, alpha=0.7, belta=1, gamma=0.)
    # mixed = weight_add(img_detection,roi , alpha=0.7, belta=1, gamma=0.)
    # cv2.imshow("mix1", mix1)
    # cv2.imshow("mixed",mixed)
    #
    cv2.imshow("detection results", img_detection)
    cv2.imwrite("/home/zdq/PycharmProjects/YOLOv2/detection.jpg",
                img_detection)
    cv2.waitKey(0)
    return img_detection
def predict(args, predictor):
    input_names = predictor.get_input_names()
    input_tensor = predictor.get_input_handle(input_names[0])

    output_names = predictor.get_output_names()
    output_tensor = predictor.get_output_handle(output_names[0])

    test_num = 500
    test_time = 0.0
    if not args.enable_benchmark:
        # for PaddleHubServing
        if args.hubserving:
            img_list = [args.image_file]
        # for predict only
        else:
            img_list = get_image_list(args.image_file)

        for idx, img_name in enumerate(img_list):
            if not args.hubserving:
                img = cv2.imread(img_name)[:, :, ::-1]
                assert img is not None, "Error in loading image: {}".format(
                    img_name)
            else:
                img = img_name
            inputs = utils.preprocess(img, args)
            inputs = np.expand_dims(inputs, axis=0).repeat(args.batch_size,
                                                           axis=0).copy()
            input_tensor.copy_from_cpu(inputs)

            predictor.run()

            output = output_tensor.copy_to_cpu()
            classes, scores = utils.postprocess(output, args)
            if args.hubserving:
                return classes, scores
            print("Current image file: {}".format(img_name))
            print("\ttop-1 class: {0}".format(classes[0]))
            print("\ttop-1 score: {0}".format(scores[0]))
    else:
        for i in range(0, test_num + 10):
            inputs = np.random.rand(args.batch_size, 3, 224,
                                    224).astype(np.float32)
            start_time = time.time()
            input_tensor.copy_from_cpu(inputs)

            predictor.run()

            output = output_tensor.copy_to_cpu()
            output = output.flatten()
            if i >= 10:
                test_time += time.time() - start_time
            time.sleep(0.01)  # sleep for T4 GPU

        fp_message = "FP16" if args.use_fp16 else "FP32"
        trt_msg = "using tensorrt" if args.use_tensorrt else "not using tensorrt"
        print("{0}\t{1}\t{2}\tbatch size: {3}\ttime(ms): {4}".format(
            args.model, trt_msg, fp_message, args.batch_size,
            1000 * test_time / test_num))
def testit(sequenceLabeler):
    passed = 0
    for sample in samples:
        pred = postprocess(test(sequenceLabeler, sample[0]))
        if pred == sample[1]:
            passed += 1

    print "\n======== ACCURACY:[", (passed*1.0/len(samples))*100, "% ] ======"
    print "====================================\n"
Example #21
0
 def synthesize_x2(self, X2_latent, parents=None):
     if isinstance(X2_latent, int):
         N = X2_latent
         X2_latent = np.random.uniform(size=(N, self.latent_dim[2]))
         X2_noise = np.random.normal(scale=0.5, size=(N, self.noise_dim[2]))
     else:
         N = X2_latent.shape[0]
         X2_noise = np.zeros((N, self.noise_dim[2]))
     if parents is None:
         X1, X0 = self.synthesize_x1(1)
     else:
         X0, X1 = parents
     X0 = preprocess(X0)
     X0 = np.tile(X0, (N,1,1,1))
     X1 = preprocess(X1)
     X1 = np.tile(X1, (N,1,1,1))
     X2 = self.sess.run(self.x2_fake, feed_dict={self.c2: X2_latent, self.z2: X2_noise, self.x1_fake: X1})
     return [postprocess(X2), postprocess(X0), postprocess(X1)]
Example #22
0
 def synthesize_x0(self, X0_latent):
     if isinstance(X0_latent, int):
         N = X0_latent
         X0_latent = np.random.uniform(size=(N, self.latent_dim[0]))
         X0_noise = np.random.normal(scale=0.5, size=(N, self.noise_dim[0]))
     else:
         N = X0_latent.shape[0]
         X0_noise = np.zeros((N, self.noise_dim[0]))
     X0 = self.sess.run(self.x0_fake, feed_dict={self.c0: X0_latent, self.z0: X0_noise})
     return [postprocess(X0)]
def segment_crf(input_str,
                model_path='sklearn_crf_model_90k-100i.sav',
                seg_sep=' '):
    ts = cleanup_str(input_str)
    kccs = seg_kcc(ts)
    features = create_kcc_features(kccs)
    loaded_model = pickle.load(open(model_path, 'rb'))
    preds = loaded_model.predict([features])[0]
    preds = [float(p) for p in preds]
    seg = postprocess(preds, kccs, '-')
    return seg
Example #24
0
    def test(self):
        epoch = self.scheduler.last_epoch + 1
        self.ckp.write_log('\nEvaluation:')
        self.model.eval()
        self.ckp.start_log(train=False)
        with torch.no_grad():
            tqdm_test = tqdm(self.loader_test, ncols=80)
            bic_PSNR = 0
            for idx_img, (lr, lre, hr, filename) in enumerate(tqdm_test):
                ycbcr_flag = False
                if self.args.n_colors == 1 and lr.size()[1] == 3:
                    # If n_colors is 1, split image into Y,Cb,Cr
                    ycbcr_flag = True
                    sr_cbcr = lre[:, 1:, :, :].to(self.device)
                    lre = lre[:, 0:1, :, :]
                    lr_cbcr = lr[:, 1:, :, :].to(self.device)
                    lr = lr[:, 0:1, :, :]
                    hr_cbcr = hr[:, 1:, :, :].to(self.device)
                    hr = hr[:, 0:1, :, :]

                filename = filename[0]
                lre = lre.to(self.device)
                lr = lr.to(self.device)
                hr = hr.to(self.device)
                sr = self.model(lr)
                PSNR = utils.calc_psnr(self.args, sr, hr)
                bic_PSNR += utils.calc_psnr(self.args, lre, hr)
                self.ckp.report_log(PSNR, train=False)
                lr, hr, sr = utils.postprocess(lr,
                                               hr,
                                               sr,
                                               rgb_range=self.args.rgb_range,
                                               ycbcr_flag=ycbcr_flag,
                                               device=self.device)

                if ycbcr_flag:
                    lr = torch.cat((lr, lr_cbcr), dim=1)
                    hr = torch.cat((hr, hr_cbcr), dim=1)
                    sr = torch.cat((sr, sr_cbcr), dim=1)

                save_list = [lr, hr, sr]
                if self.args.save_images:
                    self.ckp.save_images(filename, save_list, self.args.scale)

            self.ckp.end_log(len(self.loader_test), train=False)
            best = self.ckp.psnr_log.max(0)
            self.ckp.write_log(
                '[{}]\taverage PSNR: {:.3f} (Best: {:.3f} @epoch {})'.format(
                    self.args.data_test, self.ckp.psnr_log[-1], best[0],
                    best[1] + 1))
            print('Bicubic PSNR: {:.3f}'.format(bic_PSNR /
                                                len(self.loader_test)))
            if not self.args.test_only:
                self.ckp.save(self, epoch, is_best=(best[1] + 1 == epoch))
Example #25
0
 def synthesize(self, X_latent):
     if isinstance(X_latent, int):
         N = X_latent
         X_latent = np.random.uniform(size=(N, self.latent_dim))
         X_noise = np.random.normal(scale=0.5, size=(N, self.noise_dim))
     else:
         N = X_latent.shape[0]
         X_noise = np.zeros((N, self.noise_dim))
     X0, X1, X2, X3 = self.sess.run(
         [self.x0_fake, self.x1_fake, self.x2_fake, self.x3_fake],
         feed_dict={
             self.c: X_latent,
             self.z: X_noise
         })
     return [
         postprocess(X0),
         postprocess(X1),
         postprocess(X2),
         postprocess(X3)
     ]
Example #26
0
 def synthesize_x3(self, X3_latent, parents=None):
     if isinstance(X3_latent, int):
         N = X3_latent
         X3_latent = np.random.uniform(size=(N, self.latent_dim[3]))
         X3_noise = np.random.normal(scale=0.5, size=(N, self.noise_dim[3]))
     else:
         N = X3_latent.shape[0]
         X3_noise = np.zeros((N, self.noise_dim[3]))
     if parents is None:
         X2, X0, X1 = self.synthesize_x2(1)
     else:
         X0, X1, X2 = parents
     X0 = preprocess(X0)
     X0 = np.tile(X0, (N,1,1,1))
     X1 = preprocess(X1)
     X1 = np.tile(X1, (N,1,1,1))
     X2 = preprocess(X2)
     X2 = np.tile(X2, (N,1,1,1))
     X3 = self.sess.run(self.x3_fake, feed_dict={self.c3: X3_latent, self.z3: X3_noise, self.x2_fake: X2})
     return [postprocess(X3), postprocess(X0), postprocess(X1), postprocess(X2)]
Example #27
0
    def stylize(self, sess, content_image, style_images, init_img):
        feed_dict = self._get_feed_dict(sess, content_image, style_images, \
                                            self.config['style_image_weights'])

        #Logging important stuff
        with tf.device('/device:cpu:0'):
            logger = self.init_logger(sess)
            logger.log_images("Content_image", [postprocess(content_image)])
            logger.log_images("Style_images", list(map(postprocess, style_images)))
            logger.log_images("Init_image", [postprocess(init_img)])

        # initializing optimizer
        if self.config['verbose']: print("Initializing optimizer")
        optimizer = tf.contrib.opt.ScipyOptimizerInterface(
                self.total_loss, method='L-BFGS-B',
                options={'maxiter': self.config['max_iterations'],
                          'disp': 50})

        output = self.run_optimizer(sess, optimizer, logger, feed_dict, init_img)

        return output
def camera_detect():

    tf.app.flags.DEFINE_string('video', False, 'Whether to output video file')
    FLAGS = tf.app.flags.FLAGS

    input_size = (416, 416)

    cv2.namedWindow("camera")
    capture = cv2.VideoCapture(0)            #开启摄像头
    success, image = capture.read()

    images = tf.placeholder(tf.float32, [1, input_size[0], input_size[1], 3])
    detection_feat = darknet(images)

    if FLAGS.video:
        fps = 1
        size = (int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        videoWriter = cv2.VideoWriter('./test/result.avi',
        cv2.VideoWriter_fourcc('M','J', 'P', 'G'), fps, size)

    num = 1

    while success and cv2.waitKey(1) == -1:
        cv2.imshow('camera', image)
        image_shape = image.shape[:2]
        image_cp = preprocess_image(image, input_size)
        
        feat_sizes = input_size[0] // 32, input_size[1] // 32
        detection_results = decode(detection_feat, feat_sizes, len(class_names), anchors)

        checkpoint_path = "./checkpoint_dir/yolo2_coco.ckpt"
        #checkpoint_path = "/Users/xiang/Downloads/DeepLearning_tutorials-master/ObjectDetections/yolo2/checkpoint_dir/yolo2_coco.ckpt"
        saver = tf.train.Saver()
        with tf.Session() as sess:
            saver.restore(sess, checkpoint_path)
            bboxes, obj_probs, class_probs = sess.run(detection_results, feed_dict={images: image_cp})

        bboxes, scores, class_inds = postprocess(bboxes, obj_probs, class_probs,
                                                 image_shape=image_shape)
        img_detection = draw_detection(image, bboxes, scores, class_inds, class_names)
        
        if FLAGS.video:
            videoWriter.write(img_detection)
        else:
            cv2.imwrite("./test/"+str(num)+"test.jpg", img_detection)

        success, image = capture.read()

        num += 1

    cv2.destroyWindow("camera")
    capture.release()
Example #29
0
    def store(self, directory, metadata):
        store_directory = Path(directory)
        store_directory.mkdir(exist_ok=True, parents=True)

        sample = torch.zeros([1, 100], dtype=torch.uint8)
        model_path = store_directory / self.ONNX_NAME

        torch.onnx.export(
            self.float().cpu(),
            sample.cpu(),
            model_path,
            input_names=["input"],
            output_names=["output"],
            dynamic_axes={
                "input": {0: "batch", 1: "length"},
                "output": {0: "batch", 1: "length"},
            },
        )
        postprocess(
            model_path,
            metadata,
        )
Example #30
0
def callback(msg):
    np_img = np.fromstring(msg.data, dtype=np.uint8).reshape((720, 1280, 3))
    np_img = np_img[:, :, :3]
    img_tensor = TF.to_tensor(np_img)
    img_tensor = TF.resize(img_tensor, [384, 640])
    img_tensor.unsqueeze_(0)
    img_tensor = img_tensor.cuda()

    model = EfficientDetBackbone(compound_coef=compound_coef,
                                 num_classes=len(obj_list),
                                 ratios=eval(params['anchors_ratios']),
                                 scales=eval(params['anchors_scales']))

    model.load_state_dict(
        torch.load(weights_path, map_location=torch.device('cpu')))

    model.requires_grad_(False)
    model.eval()
    if use_cuda:
        model = model.cuda()
    print("image data type", type(msg.data))
    with torch.no_grad():
        features, regression, classification, anchors = model(img_tensor)
    regressBoxes = BBoxTransform()
    clipBoxes = ClipBoxes()
    preds = postprocess(img_tensor, anchors, regression, classification,
                        regressBoxes, clipBoxes, threshold, nms_threshold)
    preds = preds[0]

    bb_msg = preds['rois']
    # pub = rospy.Publisher('/freicar_1/control', ControlCommand, queue_size=10)
    pub = rospy.Publisher('/freicar_1/bounding_box', bb, queue_size=10)
    # rospy.spin()
    msg_to_publish = bb()

    rate = rospy.Rate(10)  # 10hz
    # import pdb;    pdb.set_trace()
    while not rospy.is_shutdown():
        msg_to_publish.x1 = bb_msg[0][0]
        msg_to_publish.x2 = bb_msg[0][1]
        msg_to_publish.y1 = bb_msg[0][2]
        msg_to_publish.y2 = bb_msg[0][3]

        # import pdb; pdb.set_trace()
        pub.publish(msg_to_publish)
        rate.sleep()

    bgr = np.zeros((np_img.shape[0], np_img.shape[1], 3), dtype=np.uint8)
    cv2.cvtColor(np_img, cv2.COLOR_RGB2BGR, bgr, 3)
    cv2.imshow('RGB image', bgr)
    cv2.waitKey(10)
Example #31
0
    def test(self):
        epoch = self.scheduler.last_epoch + 1
        self.ckp.write_log('\nEvaluation:')
        self.model.eval()
        self.ckp.start_log(train=False)
        with torch.no_grad():
            tqdm_test = tqdm(self.loader_test, ncols=80)
            for idx_img, (lr, _, filename) in enumerate(tqdm_test):
                ycbcr_flag = False
                filename = filename[0][0]
                lr = lr.to(self.device)
                frame1, frame2 = lr[:, 0], lr[:, 1]
                if self.args.n_colors == 1 and lr.size()[-3] == 3:
                    ycbcr_flag = True
                    frame1_cbcr = frame1[:, 1:]
                    frame2_cbcr = frame2[:, 1:]
                    frame1 = frame1[:, 0:1]
                    frame2 = frame2[:, 0:1]

                frame2_compensated, flow = self.model(frame1, frame2)

                PSNR = utils.calc_psnr(self.args, frame1, frame2_compensated)
                self.ckp.report_log(PSNR, train=False)
                frame1, frame2, frame2c = utils.postprocess(
                    frame1,
                    frame2,
                    frame2_compensated,
                    rgb_range=self.args.rgb_range,
                    ycbcr_flag=ycbcr_flag,
                    device=self.device)

                if ycbcr_flag:
                    frame1 = torch.cat((frame1, frame1_cbcr), dim=1)
                    frame2 = torch.cat((frame2, frame2_cbcr), dim=1)
                    frame2_cbcr_c = F.grid_sample(frame2_cbcr,
                                                  flow.permute(0, 2, 3, 1),
                                                  padding_mode='border')
                    frame2c = torch.cat((frame2c, frame2_cbcr_c), dim=1)

                save_list = [frame1, frame2, frame2c]
                if self.args.save_images and idx_img % 10 == 0:
                    self.ckp.save_images(filename, save_list, self.args.scale)

            self.ckp.end_log(len(self.loader_test), train=False)
            best = self.ckp.psnr_log.max(0)
            self.ckp.write_log(
                '[{}]\taverage PSNR: {:.3f} (Best: {:.3f} @epoch {})'.format(
                    self.args.data_test, self.ckp.psnr_log[-1], best[0],
                    best[1] + 1))
            if not self.args.test_only:
                self.ckp.save(self, epoch, is_best=(best[1] + 1 == epoch))
def main():
    vw = []
    sl = []
    while True:
        inp = raw_input("> ")

        inp = inp.strip()
        words = inp.split()

        cmd = words[0]
        if cmd == "/save":
            for temp in vw:
                temp.finish()
            sys.exit(1)
        if cmd == "/train":
            data = " ".join(words[1:]).strip()
            for i in range(10):
                for temp in sl:
                    temp.learn(preprocess([data]))
        elif cmd == "/query":
            data = " ".join(words[1:]).strip()
            output = set()
            for s in sl:
                output.add(postprocess(query(s, data)))
            for out in output:
                print "\t", out
        elif cmd == "/start":
            data = " ".join(words[1:]).strip()
            if os.path.isfile(data + ".1") and os.path.isfile(data + ".2") and os.path.isfile(
                            data + ".3") and os.path.isfile(data + ".4"):
                vw = [
                    pyvw.vw("--quiet -i " + data + ".1 -f "+data + ".1"),
                    pyvw.vw("--quiet -i " + data + ".2 -f "+data + ".2"),
                    pyvw.vw("--quiet -i " + data + ".3 -f "+data + ".3"),
                    pyvw.vw("--quiet -i " + data + ".4 -f "+data + ".4")
                ]
            else:
                vw = [
                    pyvw.vw("--search 3 --quiet --search_task hook --ring_size 2048 -f " + data + ".1"),
                    pyvw.vw("--search 3 --quiet --search_task hook --ring_size 2048 -f " + data + ".2"),
                    pyvw.vw("--search 3 --quiet --search_task hook --ring_size 2048 -f " + data + ".3"),
                    pyvw.vw("--search 3 --quiet --search_task hook --ring_size 2048 -f " + data + ".4")
                ]
            sl = [
                vw[0].init_search_task(SequenceLabeler),
                vw[1].init_search_task(SequenceLabeler2),
                vw[2].init_search_task(SequenceLabeler3),
                vw[3].init_search_task(SequenceLabeler4)
            ]
Example #33
0
 def stat_ap(self):
     cls_num = [0 for _ in self.category]
     cls_score = [np.array([], dtype=np.float32) for _ in self.category]
     cls_tp = [np.array([], dtype=np.bool) for _ in self.category]
     for data in tqdm.tqdm(self.loader):
         for key in data:
             t = data[key]
             if torch.is_tensor(t):
                 data[key] = utils.ensure_device(t)
         tensor = torch.autograd.Variable(data['tensor'], volatile=True)
         pred = pybenchmark.profile('inference')(model._inference)(
             self.inference, tensor)
         pred['iou'] = pred['iou'].contiguous()
         logits = get_logits(pred)
         pred['prob'] = F.softmax(logits, -1)
         for key in pred:
             pred[key] = pred[key].data
         if self.config.getboolean('eval', 'debug'):
             self.debug_data(data)
             self.debug_pred(pred)
         norm_bbox_data(data)
         norm_bbox_pred(pred)
         for path, difficult, image, data_yx_min, data_yx_max, data_cls, iou, yx_min, yx_max, prob in zip(
                 *(data[key] for key in 'path, difficult'.split(', ')),
                 *(torch.unbind(data[key])
                   for key in 'image, yx_min, yx_max, cls'.split(', ')),
                 *(torch.unbind(pred[key])
                   for key in 'iou, yx_min, yx_max, prob'.split(', '))):
             data_yx_min, data_yx_max, data_cls = filter_valid(
                 data_yx_min, data_yx_max, data_cls, difficult)
             for c in data_cls.cpu().numpy():
                 cls_num[c] += 1
             iou = iou.view(-1)
             yx_min, yx_max, prob = (t.view(-1, t.size(-1))
                                     for t in (yx_min, yx_max, prob))
             ret = postprocess(self.config, iou, yx_min, yx_max, prob)
             if ret is not None:
                 iou, yx_min, yx_max, cls, score = ret
                 for c in set(cls.cpu().numpy()):
                     c = int(c)  # PyTorch's bug
                     _score, tp = self.filter_cls(c, path, data_yx_min,
                                                  data_yx_max, data_cls,
                                                  yx_min, yx_max, cls,
                                                  score)
                     cls_score[c] = np.append(cls_score[c],
                                              _score.cpu().numpy())
                     cls_tp[c] = np.append(cls_tp[c], tp)
     return cls_num, cls_score, cls_tp
Example #34
0
 def stat_ap(self):
     cls_num = [0 for _ in self.category]
     cls_score = [np.array([], dtype=np.float32) for _ in self.category]
     cls_tp = [np.array([], dtype=np.bool) for _ in self.category]
     for data in tqdm.tqdm(self.loader):
         for key in data:
             t = data[key]
             if torch.is_tensor(t):
                 data[key] = utils.ensure_device(t)
         tensor = torch.autograd.Variable(data['tensor'], volatile=True)
         pred = pybenchmark.profile('inference')(model._inference)(self.inference, tensor)
         pred['iou'] = pred['iou'].contiguous()
         logits = get_logits(pred)
         pred['prob'] = F.softmax(logits, -1)
         for key in pred:
             pred[key] = pred[key].data
         if self.config.getboolean('eval', 'debug'):
             self.debug_data(data)
             self.debug_pred(pred)
         norm_bbox_data(data)
         norm_bbox_pred(pred)
         for path, difficult, image, data_yx_min, data_yx_max, data_cls, iou, yx_min, yx_max, prob in zip(*(data[key] for key in 'path, difficult'.split(', ')), *(torch.unbind(data[key]) for key in 'image, yx_min, yx_max, cls'.split(', ')), *(torch.unbind(pred[key]) for key in 'iou, yx_min, yx_max, prob'.split(', '))):
             data_yx_min, data_yx_max, data_cls = filter_valid(data_yx_min, data_yx_max, data_cls, difficult)
             for c in data_cls.cpu().numpy():
                 cls_num[c] += 1
             iou = iou.view(-1)
             yx_min, yx_max, prob = (t.view(-1, t.size(-1)) for t in (yx_min, yx_max, prob))
             ret = postprocess(self.config, iou, yx_min, yx_max, prob)
             if ret is not None:
                 iou, yx_min, yx_max, cls, score = ret
                 for c in set(cls.cpu().numpy()):
                     c = int(c)  # PyTorch's bug
                     _score, tp = self.filter_cls(c, path, data_yx_min, data_yx_max, data_cls, yx_min, yx_max, cls, score)
                     cls_score[c] = np.append(cls_score[c], _score.cpu().numpy())
                     cls_tp[c] = np.append(cls_tp[c], tp)
     return cls_num, cls_score, cls_tp
Example #35
0
"""
image = Image.open(image_file)
image_cp = image.resize(input_size, Image.BICUBIC)
image_cp = np.array(image_cp, dtype=np.float32)/255.0
image_cp = np.expand_dims(image_cp, 0)
#print(image_cp)
"""


images = tf.placeholder(tf.float32, [1, input_size[0], input_size[1], 3])
detection_feat = darknet(images)
feat_sizes = input_size[0] // 32, input_size[1] // 32
detection_results = decode(detection_feat, feat_sizes, len(class_names), anchors)

checkpoint_path = "./checkpoint_dir/yolo2_coco.ckpt"
saver = tf.train.Saver()
with tf.Session() as sess:
    saver.restore(sess, checkpoint_path)
    bboxes, obj_probs, class_probs = sess.run(detection_results, feed_dict={images: image_cp})

bboxes, scores, class_inds = postprocess(bboxes, obj_probs, class_probs,
                                         image_shape=image_shape)
img_detection = draw_detection(image, bboxes, scores, class_inds, class_names)
cv2.imwrite("detection.jpg", img_detection)
cv2.imshow("detection results", img_detection)

cv2.waitKey(0)