Esempio n. 1
0
def main():
    parser = argparse.ArgumentParser(
        description='compares two point cloud models by Chamfer distance')
    parser.add_argument('target_model_path',
                        type=str,
                        help='path to point cloud file of target model')
    parser.add_argument('check_model_path',
                        type=str,
                        help='path to point cloud file of check model')
    parser.add_argument(
        '-s',
        '--subset',
        type=int,
        help='use only subset of points of target model for distance estimation'
    )
    args = parser.parse_args()

    target_model = read_model(args.target_model_path)
    check_model = read_model(args.check_model_path)

    subset_size = 500
    if args.subset:
        subset_size = args.subset
        print('subset size: %s' % subset_size)
    else:
        print('default subset size: %s' % subset_size)

    compare(target_model, check_model, subset_size)
Esempio n. 2
0
def solve_graph(graph,
                weights,
                complexity_width,
                timeout: int = 10,
                debug=False):
    cnfpath = "temp-cw.cnf"
    with open(cnfpath, 'w') as cnffile:
        enc = SvEncodingWithComplexity(cnffile, graph, weights, debug)
        enc.encode_sat(complexity_width)
    print(f"enc: {enc.__class__.__name__}, ({enc.num_clauses} clauses)")
    if debug: print("encoding done")
    base_cmd = ["glucose", "-verb=0", "-model"]
    cmd = base_cmd + [cnfpath, f"-cpu-lim={timeout}"]
    start = now()
    proc = subprocess.run(cmd,
                          universal_newlines=True,
                          stdout=subprocess.PIPE,
                          stderr=subprocess.STDOUT)
    runtime = now() - start
    output = proc.stdout
    model = read_model(output)
    dec = TwbnDecoder(enc, -1, model, "")
    elim_order = dec.get_elim_order()
    # if USE_DD:
    #     enc.debug_dd_counter(model, elim_order)
    # else:
    #     enc.debug_counters(model, elim_order)
    tri = dec.get_triangulated().to_undirected()
    td = TreeDecomposition(tri, elim_order, width=-1)
    if DRAWING:
        pos = graphviz_layout(dec.get_triangulated(), prog='dot')
        nx.draw(dec.get_triangulated(), pos, with_labels=True)
        plt.show()
        td.draw()
    return td
Esempio n. 3
0
 def __init__(self, data_file, features_file, sep, model_file, plot, epochs,
              lr):
     self.model_file = model_file
     self.plot = plot
     self.epochs = epochs
     self.model = {}
     self.ranges = {}
     self.lr = lr
     self.acc = []
     self.loss = []
     self.val_acc = []
     self.val_loss = []
     # Read data
     self.data, self.labels = read_data(data_file, sep)
     if len(self.data) == 0:
         print("Error : no valid data found in %s" % data_file)
         exit(0)
     self.features = self.get_features(features_file)
     self.classes_column = "Hogwarts House"
     self.classes = classes_list(self.data, self.classes_column)
     # Read model
     if len(model_file):
         self.model, _, _ = read_model(model_file, self.classes)
     self.X_train = []
     self.X_val = []
     self.Y_train = []
     self.Y_val = []
     self.thetas = []
     self.curr_class = ""
Esempio n. 4
0
def main():
    parser = argparse.ArgumentParser(description='Chamfer distance-based KNN')
    parser.add_argument('target_model_path',
                        type=str,
                        help='path to point cloud file of target model')
    parser.add_argument('models_dir_path',
                        type=str,
                        help='path to the check models directory')
    parser.add_argument(
        '-s',
        '--subset',
        type=int,
        help='use only subset of points of target model for KNN estimation')
    args = parser.parse_args()

    target_model = read_model(args.target_model_path)

    models = read_models(args.models_dir_path)

    subset_size = 100
    if args.subset:
        subset_size = args.subset
        print('subset size: %s' % subset_size)
    else:
        print('default subset size: %s' % subset_size)

    dists = find_nearest(target_model, models, subset_size)

    print('K Nearest Models to %s:' % args.target_model_path)
    for dist in dists:
        print('%s:\t%s' % dist)
Esempio n. 5
0
def solve_bn(data: BNData, treewidth: int, input_file: str, forced_arcs=None,
             forced_cliques=None, pset_acyc=None, timeout: int = TIMEOUT,
             domain_sizes=None, debug=False):
    cnfpath = "temp.cnf"
    with open(cnfpath, 'w') as cnffile:
        if domain_sizes is None:
            enc = TwbnEncoding(data, cnffile, forced_arcs, forced_cliques,
                               pset_acyc, debug)
        else:
            enc = CwbnEncoding(data, cnffile, forced_arcs, forced_cliques,
                               pset_acyc, debug)
            enc.use_dd = True
            enc.set_weights(weights_from_domain_sizes(domain_sizes))
        enc.encode_sat(treewidth)
    if debug: print("encoding done")
    if debug: print(f"maxsat stats: {len(enc.vars)} vars, {enc.num_clauses} clauses")
    #sys.exit()
    base_cmd = [os.path.join(SOLVER_DIR, "uwrmaxsat"), "-m", "-v0"]
    cmd = base_cmd + [cnfpath, f"-cpu-lim={timeout}"]
    start = now()
    try:
        output = subprocess.check_output(cmd, universal_newlines=True,
                                         stderr=subprocess.STDOUT)
    except subprocess.CalledProcessError as err:
        # copy over problematic cnf file
        errcnf = "error.cnf"
        shutil.copy(cnfpath, errcnf)
        if err.stdout is not None:
            errfilename = "uwrmaxsat-err.log"
            with open(errfilename, 'w') as errfile:
                errfile.write(err.stdout)
                #raise RuntimeError(f"error while running uwrmaxsat on {errcnf}"
                #                   f"\nrc: {err.returncode}, check {errfilename}")
                print(f"error while running uwrmaxsat on {errcnf}"
                      f"\nrc: {err.returncode}, check {errfilename}")
                raise NoSolutionException("nonzero returncode")
        else:
            #raise RuntimeError(f"error while running uwrmaxsat on {errcnf}"
            #                   f"\nrc: {err.returncode}, no stdout captured")
            print(f"error while running uwrmaxsat on {errcnf}"
                  f"\nrc: {err.returncode}, no stdout captured")
            raise NoSolutionException("nonzero returncode")
    else:  # if no error while maxsat solving
        runtime = now() - start
        model = read_model(output)
        dec = TwbnDecoder(enc, treewidth, model, input_file)
        return dec.get_bn()
Esempio n. 6
0
def predict_tm_score(feature_row):
    global MODEL
    if MODEL is None:
        MODEL = utils.read_model()

    # Start with the intercept
    tm_score = MODEL.get('intercept', 0.0)

    # Get rid of the tm-score, if it's in the dictionary
    actual_tm_score = feature_row.pop('tm-score', None)

    # Compute the sum of weight vector values and feature values
    for seq_num in feature_row.keys():
        for feat_type in feature_row[seq_num].keys():
            for feat_name in feature_row[seq_num][feat_type].keys():
                tm_score += feature_row[seq_num][feat_type][feat_name] * \
                    MODEL[seq_num][feat_type][feat_name]

    return tm_score
Esempio n. 7
0
def main():
    try:
        model = utils.read_model()
        data = pd.read_csv(p +"/data/weather_data.csv")
    except:
        print("Problem in loading")
        return -1
    
    last_row = data[-1:]
    
    if not utils.is_last_data_is_new(last_row):
        print("last data not new")
        return -1
    
    prevision = model.make_prevision(last_row)
    prevision.display()
    
    print("done!")
    return 0
Esempio n. 8
0
def predict(args):
    """
    This is the demonstration for the batch prediction
    :param args: parameter for model related config
    """

    addr, port, rank, world_size = extract_xgbooost_cluster_env()

    dmatrix, y_test = read_predict_data(rank, world_size, None)

    model_path = args.model_path
    booster = read_model("oss", model_path, args)

    preds = booster.predict(dmatrix)

    best_preds = np.asarray([np.argmax(line) for line in preds])
    score = precision_score(y_test, best_preds, average='macro')

    logging.info("Predict accuracy: %f", score)
Esempio n. 9
0
def main():
    logger.info("Make prevision started")
    try:
        model = utils.read_model()
        data = utils.read_data()
    except:
        logger.critical("Problem in loading data")
        return -1
    
    last_row = data[-1:]
    
    if not utils.is_last_data_is_new(last_row):
        logger.warning("last data not new")
        return -1
    
    prevision = model.make_prevision(last_row)
    prevision.display()
    
    logger.info("Make prevision ended withou errors")
    return 0
Esempio n. 10
0
 def __init__(self, data_file, sep, model_file):
     self.model_file = model_file
     self.model = {}
     self.ranges = {}
     self.features = []
     self.classes = []
     # Read data
     self.data, _ = read_data(data_file, sep)
     if len(self.data) == 0:
         print("Error : no valid data found in %s" % data_file)
         exit(0)
     # Read model
     if len(model_file):
         self.model, self.ranges, self.classes = read_model(
             model_file, self.classes)
         if len(self.model) == 0:
             print("Error : no model found in %s" % model_file)
             exit(0)
     for feat in self.ranges:
         self.features.append(feat)
Esempio n. 11
0
import argparse

import open3d as o3d
import numpy as np

from model import Model
from utils import read_model, get_subset_model, view_model


parser = argparse.ArgumentParser()
parser.add_argument('path', type=str,
                    help='path to point cloud file')
parser.add_argument('-s', '--subset', type=int,
                    help='use only subset of points')
args = parser.parse_args()


model = read_model(args.path)

print(model.pcd)

if args.subset:
    model = get_subset_model(model, args.subset)

view_model(model)
from keras.datasets import cifar10
from keras.utils import np_utils
from utils import visualize_dense_activations, visualize_activations, read_model, get_class_data
from keras.utils import plot_model
from vis.utils import utils
import numpy as np
from keras import backend as K
from matplotlib import pyplot as plt

K.set_image_dim_ordering('th')

# 1. read the model & pretrained weights
model = read_model("saved/final_model.txt", "saved/final_weights.txt")

# 2. print summary and plot model architecture
print(model.summary())
plot_model(model, to_file='model.png')

# 3. get some data to visualize through activations & normalize
(X_train, y_train), (X_test, y_test_orig) = cifar10.load_data()
X_test = X_test.astype('float32')
X_test = X_test / 255.0
y_test = np_utils.to_categorical(y_test_orig)

# 3. visualize
conv_layer_idx = utils.find_layer_idx(model, "conv2d_1")
conv_layer_idx2 = utils.find_layer_idx(model, "conv2d_2")
final_layer_idx = utils.find_layer_idx(model, "dense_2")

airplanes = get_class_data(X_test, y_test_orig, 0)
ships = get_class_data(X_test, y_test_orig, 8)
Esempio n. 13
0
 def get(self, network, model):
     return utils.read_model(data_path, network, model)
Esempio n. 14
0
def test_download_model(model_path, args):

    return read_model(type="local", model_path=model_path, args=args)
Esempio n. 15
0
import sys
sys.path.insert(0, '../')
from utils import preprocess, read_model, read_data, create_inverted_index, search

df_profiles = None
word2vec_profiles = None
inverted_index_profiles = None

@app.route('/ping')
def ping():
    return 'pong!'

@app.route('/preprocess')
def preprocess_string():
    words = 'Programador Python avanzando reconocimiento de KPIs y certificación ORACLE'
    return preprocess(words)

@app.route('/query')
def query():
    query = 'Programador Python avanzando reconocimiento de KPIs y certificación ORACLE'
    return str(search(query, word2vec_profiles, inverted_index_profiles))

import os

if __name__ == '__main__':
    root_dir = '../../_data/s3/'
    df_profiles = read_data(root_dir + 'df_profiles_25k.csv')
    word2vec_profiles = read_model(root_dir + 'word2vec_profiles_25k') 
    inverted_index_profiles = create_inverted_index(df_profiles, word2vec_profiles)
    app.run()
Esempio n. 16
0
 def get(self, network, model):
     return utils.read_model(data_path, network, model)
from keras.datasets import cifar10
from keras.utils import np_utils
from utils import visualize_dense_activations, visualize_activations, read_model, get_class_data, save_model
from keras.utils import plot_model
from keras.optimizers import SGD
from vis.utils import utils
import numpy as np
from keras import backend as K
from matplotlib import pyplot as plt

K.set_image_dim_ordering('th')

# 1. read the model & pretrained weights
model = read_model("model.txt", "weights.txt")

# 2. print summary and plot model architecture
print(model.summary())

# 3. get some data
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
augment = False
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
if augment:
    datagen = ImageDataGenerator(featurewise_center=False,
                                 featurewise_std_normalization=False,
                                 samplewise_center=False,
                                 samplewise_std_normalization=False,
                                 zca_whitening=False,
                                 rotation_range=3,
                                 shear_range=0.1,