Esempio n. 1
0
    parser.add_argument('--num_points',
                        type=int,
                        default=2048,
                        help='Num of points to use')
    parser.add_argument('--model_path',
                        type=str,
                        default='',
                        metavar='N',
                        help='Path to load model')
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = get_parser()
    if args.eval == False:
        if args.task == 'reconstruct':
            reconstruction = Reconstruction(args)
            reconstruction.run()
        elif args.task == 'classify':
            classification = Classification(args)
            classification.run()
        elif args.task == 'segment':
            segmentation = Segmentation(args)
            segmentation.run()
    else:
        inference = Inference(args)
        feature_dir = inference.run()
        svm = SVM(feature_dir)
        svm.run()
Esempio n. 2
0
                       str(prior["type"][idx]) + "_loc=" +
                       str(int(prior["location"][idx])) + "_scl=" +
                       str(int(prior["scale"][idx])) + ".h5")

    file_chains = dir_chains + name_chains
    file_csv = file_chains.replace("h5", "csv")

    if not os.path.isfile(file_chains):
        p1d = Inference(posterior=Posterior,
                        prior=prior["type"],
                        prior_loc=prior["location"],
                        prior_scale=prior["scale"],
                        n_walkers=n_walkers,
                        zero_point=zero_point)
        p1d.load_data(file_data, id_name=id_name)
        p1d.run(n_iter, file_chains=file_chains, tol_convergence=tolerance)

    #----------------- Analysis ---------------
    a1d = Analysis(
        n_dim=dimension,
        file_name=file_chains,
        id_name=id_name,
        dir_plots=dir_plots,
        tol_convergence=tolerance,
        statistic=statistic,
        quantiles=[0.05, 0.95],
        # transformation=None,
        names="2",
        transformation="ICRS2GAL",
    )
    a1d.plot_chains()
Esempio n. 3
0
iris = load_iris()
X, y = iris.data, iris.target
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.8,
                                                    random_state=42)

import xgboost as xgb

xgb_model = xgb.XGBClassifier()
xgb_model = xgb_model.fit(X_train, y_train)

print("Test data accuracy of the xgb classifier is {:.2f}".format(
    xgb_model.score(X_test, y_test)))

from onnxmltools.convert import convert_xgboost, convert_lightgbm
from onnxconverter_common.data_types import FloatTensorType

onnx_model = convert_xgboost(xgb_model,
                             initial_types=[("input", FloatTensorType([1,
                                                                       4]))])

with open("gbtree.onnx", "wb") as f:
    f.write(onnx_model.SerializeToString())

if __name__ == '__main__':
    from inference import Inference

    infer = Inference("gbtree.onnx")
    print(infer.run(X[:1]))
Esempio n. 4
0

class NN(nn.Module):
    def __init__(self):
        super().__init__()
        self.fc1 = nn.Linear(4, 16)
        self.fc2 = nn.Linear(16, 3)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = self.fc2(x)  # F.softmax
        return x


net = NN()
print(net)

# Export: X.shape
onnx.export(net,
            torch.randn(128, 4),
            './iris.onnx',
            verbose=True,
            input_names=['input_name'],
            output_names=['output_name'])

if __name__ == '__main__':
    from inference import Inference

    infer = Inference()
    print(infer.run(X[:128])[:5])
Esempio n. 5
0
import cv2
from inference import Inference
import flask

im1 = cv2.imread('bed (1).jpg')
infer = Inference()
#for i in range(1,9):

output = infer.run(im1)
cv2.imshow("output", output[0] / 255.0)
cv2.waitKey(0)
cv2.destroyAllWindows()
Esempio n. 6
0
from utils import config
GPU_LIST = config.INFERENCE_GPUS
import os
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join('{0}'.format(n) for n in GPU_LIST)
from inference import Inference


if __name__ == '__main__':
    pg = Inference(data_dir='/path/to/data/', data_list='/path/to/list',
                    class_num=2, result_dir='./result', use_level=1)
    pg.run()