Beispiel #1
0
def main(args):
    dataset_test = HEDDataset(csv_path=args.list,
                              root_dir=args.dir,
                              enableBatch=True,
                              enableInferMode=True)
    test_loader = DataLoader(dataset_test, batch_size=1, shuffle=False)

    device = torch.device("cpu" if args.no_cuda else "cuda:0")
    p = Predictor(HED, args.model, device, args.dst, test_loader)
    p.infer()
def predict():
    received_keys = sorted(list(request.form.keys()))
    if len(received_keys) > 1 or 'data' not in received_keys:
        err = 'Wrong request keys'
        return make_response(jsonify(error=err), 400)

    data = json.loads(request.form.get(received_keys[0]))
    df = pd.DataFrame.from_dict(data)

    predictor = Predictor()
    response_dict = {'prediction': predictor.predict(df).tolist()}

    return make_response(jsonify(response_dict), 200)
Beispiel #3
0
def main():
    # load data
    dataset = CSQADataset()
    vocabs = dataset.get_vocabs()
    inference_data = dataset.get_inference_data()

    logger.info(f'Inference question type: {args.question_type}')
    logger.info('Inference data prepared')
    logger.info(f"Num of inference data: {len(inference_data)}")

    # load model
    model = CARTON(vocabs).to(DEVICE)

    logger.info(f"=> loading checkpoint '{args.model_path}'")
    if DEVICE.type == 'cpu':
        checkpoint = torch.load(f'{ROOT_PATH}/{args.model_path}',
                                encoding='latin1',
                                map_location='cpu')
    else:
        checkpoint = torch.load(f'{ROOT_PATH}/{args.model_path}',
                                encoding='latin1')
    args.start_epoch = checkpoint['epoch']
    model.load_state_dict(checkpoint['state_dict'])
    logger.info(
        f"=> loaded checkpoint '{args.model_path}' (epoch {checkpoint['epoch']})"
    )

    # construct actions
    predictor = Predictor(model, vocabs)
    Inference().construct_actions(inference_data, predictor)
Beispiel #4
0
def predict():
    received_keys = sorted(list(request.form.keys()))
    if len(received_keys) > 1 or 'data' not in received_keys:
        err = 'Wrong request keys'
        return make_response(jsonify(error=err), 400)

    data = json.loads(request.form.get(received_keys[0]))
    df = pd.DataFrame.from_dict(data)

    loader = DataLoader()
    loader.fit(df)
    processed_df = loader.load_data()
    predictor = Predictor()
    processed_df.to_csv('data/proc.csv', index=False)
    response_dict = {'prediction': predictor.predict(processed_df).tolist()}
    return make_response(jsonify(response_dict), 200)
Beispiel #5
0
def main(input_size, model_path):
    demo = Predictor(model_path, input_size)

    while True:
        glob_pattern = os.path.join("/app/data/lsun_room/images/", '*.jpg')
        img_fn = random.choice(glob(glob_pattern))

        img = np.array(Image.open(img_fn).resize(input_size))

        output, label_map = demo.process(img)
        output_layout = output.copy()
        output_layout_edges = output.copy()

        for label in np.unique(label_map):
            label_map = discard_smallest_blobs(label_map,
                                               label,
                                               discard_value=-1,
                                               plot_diff=False)
            pts = np.flip(np.array(np.where(label_map == label)).T, axis=1)
            hull = ConvexHull(pts, qhull_options='Qt')
            #rot_angle, area, width, height, center_point, corner_points = minBoundingRect(pts[hull.vertices])
            corner_points = pts[hull.vertices]
            #corner_points = minimum_bounding_rectangle(pts)
            corner_points = corner_points.astype(np.int32)
            polygon_pts = corner_points
            #polygon_pts_rdp = rdp(corner_points, epsilon=10)
            cv2.fillPoly(output_layout, pts=[polygon_pts], color=COLORS[label])
            cv2.polylines(output_layout_edges, [polygon_pts], True,
                          COLORS[label])

        f, axarr = plt.subplots(1, 4)
        axarr[0].imshow(img, interpolation='bicubic')
        axarr[1].imshow(output.astype('float32'), interpolation='bicubic')
        axarr[2].imshow(output_layout.astype('uint8'), interpolation='bicubic')
        axarr[3].imshow(output_layout_edges.astype('uint8'),
                        interpolation='bicubic')
        plt.title(img_fn)
        plt.show()

        alpha = 0.9
        output = cv2.addWeighted(output, alpha, output_layout, 1 - alpha, 0)

        scipy.misc.imsave('output/super_res_output.jpg', output)
Beispiel #6
0
def main(input_size, model_path):
    demo = Predictor(model_path, input_size)

    img = Image.open('/app/hubstairs.jpg').resize(input_size)

    output, label_map = demo.process(img)

    lines, output = gen_linear_layout_map(label_map, output)

    minmax = lambda x, y: (min(320, max(0, x)), min(320, max(0, y)))
    for pt1, pt2 in lines:
        cv2.circle(output, minmax(*pt1), 10, [255,0,0], thickness=1, lineType=8, shift=0) 
        cv2.circle(output, minmax(*pt2), 10, [255,0,0], thickness=1, lineType=8, shift=0) 
        for pt3, pt4 in lines:
            if pt1 != pt3 and pt2 != pt4:
                x, y = get_intersect(pt1, pt2, pt3, pt4)
                cv2.circle(output, (int(x), int(y)), 5, [255,0,0], thickness=1, lineType=8, shift=0) 
                plt.imshow(output, cmap = 'gray', interpolation = 'bicubic')
                plt.xticks([]), plt.yticks([])  # to hide tick values on X and Y axis
                plt.show()                

    scipy.misc.imsave('output/super_res_output.jpg', output)
def predict():
    #received_keys = sorted(list(request.form.keys()))
    #if len(received_keys) > 1 or 'data' not in received_keys:
    #    err = 'Wrong request keys'
    #    return make_response(jsonify(error=err), 400)

    #data = json.loads(request.form.get(received_keys[0]))

    #with open('settings/specifications.json') as f:
    #    specifications = json.load(f)
    #info = specifications['description']
    #x_columns = info['X']
    #y_column = info['y']
    #test_set = pd.read_csv(VAL_CSV, header=0)
    #x, y = test_set[x_columns], test_set[y_column]
    #data = {'data': json.dumps(x.to_dict())}
    #df = pd.DataFrame(columns=x_columns, data=x.values)
    #predictor = Predictor()
    #response_dict = {'prediction': predictor.predict(df).tolist()}
    PREDICT_ROUTE = "/predict"
    #response = requests.get(PREDICT_ROUTE, data=data)
    received_keys = sorted(list(request.form.keys()))
    if len(received_keys) > 1 or 'data' not in received_keys:
        err = 'Wrong request keys'
        return make_response(jsonify(error=err), 400)

    data = json.loads(request.form.get(received_keys[0]))
    df = pd.DataFrame.from_dict(data)

    loader = DataLoader()
    loader.fit(df)
    processed_df = loader.load_data()

    predictor = Predictor()
    response_dict = {'prediction': predictor.predict(processed_df).tolist()}

    return make_response(jsonify(response_dict), 200)
Beispiel #8
0
def main():
    # load data
    dataset = CSQADataset()
    vocabs = dataset.get_vocabs()
    _, val_data, test_data = dataset.get_data()
    _, val_helper, test_helper = dataset.get_data_helper()

    # load model
    model = LASAGNE(vocabs).to(DEVICE)

    # define loss function (criterion)
    criterion = {
        LOGICAL_FORM: SingleTaskLoss,
        NER: SingleTaskLoss,
        COREF: SingleTaskLoss,
        GRAPH: SingleTaskLoss,
        MULTITASK: MultiTaskLoss
    }[args.task](ignore_index=vocabs[LOGICAL_FORM].stoi[PAD_TOKEN])

    logger.info(f"=> loading checkpoint '{args.model_path}'")
    if DEVICE.type == 'cpu':
        checkpoint = torch.load(f'{ROOT_PATH}/{args.model_path}',
                                encoding='latin1',
                                map_location='cpu')
    else:
        checkpoint = torch.load(f'{ROOT_PATH}/{args.model_path}',
                                encoding='latin1')
    args.start_epoch = checkpoint[EPOCH]
    model.load_state_dict(checkpoint[STATE_DICT])
    logger.info(
        f"=> loaded checkpoint '{args.model_path}' (epoch {checkpoint[EPOCH]})"
    )

    # prepare training and validation loader
    val_loader, test_loader = BucketIterator.splits(
        (val_data, test_data),
        batch_size=args.batch_size,
        sort_within_batch=False,
        sort_key=lambda x: len(x.input),
        device=DEVICE)

    logger.info('Loaders prepared.')
    logger.info(f"Validation data: {len(val_data.examples)}")
    logger.info(f"Test data: {len(test_data.examples)}")

    # calculate loss
    val_loss = test(val_loader, model, vocabs, criterion)
    logger.info(f'* Val Loss: {val_loss:.4f}')
    test_loss = test(test_loader, model, vocabs, criterion)
    logger.info(f'* Test Loss: {test_loss:.4f}')

    # calculate accuracy
    predictor = Predictor(model, vocabs, DEVICE)
    # val_scorer = Scorer()
    test_scorer = Scorer()
    # val_scorer.data_score(val_data.examples, val_helper, predictor)
    test_scorer.data_score(test_data.examples, test_helper, predictor)
    test_scorer.write_results()

    # log results
    for partition, results in [[
            'Test', test_scorer.results
    ]]:  # [['Val', val_scorer.results], ['Test', test_scorer.results]]:
        logger.info(f'* {partition} Data Results:')
        for question_type, question_type_results in results.items():
            logger.info(f'\t{question_type}:')
            for task, task_result in question_type_results.items():
                logger.info(f'\t\t{task}: {task_result.accuracy:.4f}')
Beispiel #9
0
def model():
    predictions = Predictor()
    data = predictions.get_forecast()
    data = predictions.processing(data)
    return predictions.get_predictions(data)
Beispiel #10
0
    parser.add_argument("path_to_image", help="mandatory location test image")
    parser.add_argument ("path_to_checkpoint", help="mandatory location model checkpoint")

    # Return top KKK
    parser.add_argument ('--top_k', action="store", type=int, default=5)
    # a mapping of categories to real names
    parser.add_argument ('--category_names', action="store", type=str, default="cat_to_name.json")
    # enable GPU inference
    parser.add_argument ('--gpu', action="store_true", default=False)

    # parse the arguments
    args = parser.parse_args ()
    # store arguments into variables

    path_to_image = args.path_to_image
    path_to_checkpoint = args.path_to_checkpoint
    top_k = args.top_k
    category_names = args.category_names
    gpu = args.gpu

    # process image
    Predictor(path_to_image, path_to_checkpoint, top_k, category_names, gpu)

    print(path_to_image)
    print(path_to_checkpoint)
    print(top_k)
    print(category_names)
    print(gpu)