Example #1
0
def main():
    cfg.setup()
    cfg.sv = "pcie.v"
    empty = cfg.build_design(cfg.sv, get_substs("GSR", "ENABLED"))
    words, enums = get_params.get_params(
        os.path.join(os.environ['RADIANTDIR'], "cae_library", "simulation",
                     "verilog", "lifcl", "PCIE.v"))
    # force words with non-zero default to zero...
    for n, w, d in words:
        if int(d, 2) != 0:
            defaults.append((n, "0b{}".format("0" * w)))

    def per_word(w):
        name, width, default = w
        nonrouting.fuzz_ip_word_setting(
            cfg,
            name,
            width,
            lambda b: get_substs(name, str(bin2bin(b))),
            "",
            default=[d == "1" for d in reversed(default)])

    fuzzloops.parallel_foreach(words, per_word)

    def per_enum(e):
        name, options = e
        nonrouting.fuzz_ip_enum_setting(cfg, empty, name, options,
                                        lambda x: get_substs(name, x), "")

    fuzzloops.parallel_foreach(enums, per_enum)
def main(argv):
    
    params = get_params() # check get_params.py in the same directory to see the parameters
    
    try:
      opts, args = getopt.getopt(argv,"hr:o:s:c:g:",["root=","out=","saliency_model=","caffe_path=", "gpu="])
    except getopt.GetoptError:
      print 'ERROR'
      sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
              print 'saliency.py -r <root> -o <out> -s <saliency_model> -c <caffe_path> -g <gpu>'
              sys.exit()
        elif opt in ("-r", "--root"):
              params['root'] = arg
        elif opt in ("-o", "--out"):
              params['out'] = arg
        elif opt in ("-s", "--saliency_model"):
              params['saliency_model'] = arg
        elif opt in ("-c", "--caffe_path"):
              params['caffe_path'] = arg
        elif opt in ("-g", "--gpu"):
              params['gpu'] = arg;
              
    sys.path.insert(0,os.path.join(params['caffe_path'],'python'))
    import caffe
    
    compute = 'True' # sys.argv[1] # write 'true' or 'false' in case you want to compute or just visualize
    
    
    if compute== 'true' or compute =='True':
        
        
        deploy_file = os.path.join(params['saliency_model'],'deploy.prototxt')
        model_file = os.path.join(params['saliency_model'],'model.caffemodel')
        # I am using the mean file from caffenet...but I guess we could use a grey image as well ?
        mean_file = '/media/HDD_2TB/mcarne/keyframe-extractor/src/Saliency/deep/meanfile.npy'
        
        if params['gpu'] == True:
            caffe.set_mode_gpu()
            print 'GPU mode selected'
        else: 
            caffe.set_mode_cpu()
            print 'CPU mode selected'
            
        net = caffe.Classifier(deploy_file, model_file, mean=np.load(mean_file).mean(1).mean(1), channel_swap=(2,1,0),raw_scale=255)
        if not os.path.exists(params['out']):
            os.makedirs(params['out'])
        
        for imagepath in glob.glob(params['root']+"/*.jpg"):
            print "Procressing image..."
            scores = net.predict([caffe.io.load_image(imagepath)])    
            feat = net.blobs['deconv1'].data
            #feat = np.reshape(feat, (10,4096))
            print feat, np.shape(feat)
            #meanfeat = np.average( feat, axis = 0 ) 
            # saves to disk
            fout = params['out']+'/'+os.path.splitext(os.path.basename(imagepath))[0];
            pickle.dump(feat,open(fout+'.p','wb'))
            scipy.io.savemat(fout+'.mat', mdict={'isal': feat})
Example #3
0
def test_length():
    with tf.Session() as session:
        filename = 'tiny-shakespeare.txt'
        batch_generator = BatchGenerator(filename, 100, 2)

        texts = ['hello tensorflow', 'goodluck you']
        for index, text in enumerate(texts):
            print "[{}] '{}': {} characters".format(index + 1, text, len(text))

        batch = batch_generator(texts)
        batch = tf.convert_to_tensor(batch, tf.float32)

        pm = PredictiveCodingModel(get_params(), batch)
        print session.run([pm.length, pm.mask])
Example #4
0
def stop_main(api_index, f, info_list,max_workers):
    logger.debug('Setting authentication info')
    auth_info = authentication_cli(fp = f, info_list  = info_list, api_index = api_index)
    
    logger.debug('loading external data')
    ext_info = external_data(auth_info, info_list = info_list, fp = f)
    
    logger.debug('Getting cluster information')
    params = get_params.get_params(ext_info, auth_info, info_list = info_list, f = f, api_index = api_index)
    params()
    params.show_cluster_info()
    
    index, cluster_id = get_cluster_id.get_cluster_id(params.cluster_info_all, info_list, f, api_index)
    
    if(index == True):
        logger.debug("Start stopping the cluster : " + str(cluster_id))
        printout("Start stopping the cluster : " + str(cluster_id), info_type = 0, info_list = info_list, fp = f)
        stop_obj = stop_class.stop_sacluster(params.cluster_info_all[cluster_id], auth_info, max_workers,fp = f, info_list = info_list, api_index = api_index)
        stop_obj()
        logger.debug("Finished stopping the cluster : " + str(cluster_id))
        printout("Finished stopping the cluster : " + str(cluster_id), info_type = 0, info_list = info_list, fp = f)
        
    else:
        logger.debug('There are no clusters to stop')
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
Example #5
0
def main():
    cfg.setup()
    cfg.sv = "dphy.v"
    empty = cfg.build_design(cfg.sv, dict(k="GSR", v="ENABLED"))
    words, enums = get_params.get_params(
        os.path.join(os.environ['RADIANTDIR'], "cae_library", "simulation",
                     "verilog", "lifcl", "DPHY.v"))

    def per_word(w):
        name, width, default = w
        nonrouting.fuzz_ip_word_setting(
            cfg, name, width, lambda b: dict(k=name, v=str(bin2bin(b))), "")

    fuzzloops.parallel_foreach(words, per_word)

    def per_enum(e):
        name, options = e
        nonrouting.fuzz_ip_enum_setting(cfg, empty, name, options,
                                        lambda x: dict(k=name, v=x), "")

    fuzzloops.parallel_foreach(enums, per_enum)
Example #6
0
def test_get_params_success(permission_table, permission_table_name):
    os.environ["dynamodb_permissions_table_name"] = permission_table_name
    event = {
        "requestContext": {
            "authorizer": {
                "jwt": {
                    "claims": {
                        "email": "*****@*****.**",
                        "profile": "private",
                        "nickname": "alice",
                        "custom:is_superuser": "******",
                    }
                }
            }
        },
    }

    response = get_params(event, context=None)

    assert "instance_types" in response
    assert "operating_systems" not in response
    assert "max_days_to_expiry" in response
    assert "max_instance_count" in response
    assert "max_extension_count" in response
Example #7
0
def ps_main(api_index, f, info_list):
    logger.debug('Setting authentication info')
    auth_info = authentication_cli(fp=f,
                                   info_list=info_list,
                                   api_index=api_index)

    logger.debug('loading external data')
    ext_info = external_data(auth_info, info_list=info_list, fp=f)

    logger.debug('Checking cloud states')
    ext_info = check_cloud_state(ext_info,
                                 auth_info,
                                 info_list=info_list,
                                 fp=f,
                                 api_index=api_index)

    logger.debug('Getting cluster information')
    params = get_params.get_params(ext_info,
                                   auth_info,
                                   info_list=info_list,
                                   f=f,
                                   api_index=api_index)
    params()
    params.show_cluster_info()
Example #8
0
from get_params import get_params
import sys
import os
import numpy as np
import matplotlib.pylab as plt
import pickle
from select_samples import find_coordinates
import cv2

''' Obtain Saliency map for query images using SalNet '''

params = get_params() # check get_params.py in the same directory to see the parameters

sys.path.insert(0,os.path.join(params['caffe_path'],'python'))
import caffe

def init_net(params):
    
    deploy_file = os.path.join(params['saliency_model'],'deploy.prototxt')
    model_file = os.path.join(params['saliency_model'],'model.caffemodel')
    
    # I am using the mean file from caffenet...but I guess we could use a grey image as well ?
    mean_file = '/imatge/asalvador/work/chalearn/models/bvlc_reference_caffenet/meanfile.npy'
    
    caffe.set_mode_gpu()
    net = caffe.Classifier(deploy_file, model_file, mean=np.load(mean_file).mean(1).mean(1), channel_swap=(2,1,0),raw_scale=255)
    
    return net
    
def get_saliency(net,params):
    
Example #9
0
from Sampling import Sampling
from get_params import get_params

if __name__ == "__main__":
    print(Sampling(get_params())('I', 500))
Example #10
0
def modify_main(api_index, f, info_list, max_workers):
    logger.debug('Setting authentication info')
    auth_info = authentication_cli(fp=f,
                                   info_list=info_list,
                                   api_index=api_index)

    logger.debug('loading external data')
    ext_info = external_data(auth_info, info_list=info_list, fp=f)

    logger.debug('Checking cloud states')
    ext_info = check_cloud_state(ext_info,
                                 auth_info,
                                 info_list=info_list,
                                 fp=f,
                                 api_index=api_index)

    logger.debug('Getting cluster information')
    params = get_params.get_params(ext_info,
                                   auth_info,
                                   info_list=info_list,
                                   f=f,
                                   api_index=api_index)
    params()

    #import pprint
    #pprint.pprint(params.cluster_info_all)
    #sys.exit()

    params.show_cluster_info()

    index, cluster_id = get_cluster_id.get_cluster_id(params.cluster_info_all,
                                                      info_list, f, api_index)

    state, obj = params.checking_status(cluster_id)
    if (state == False):
        conf_stop_process(info_list, f)

        logger.debug("Start stopping the cluster : " + str(cluster_id))
        printout("Start stopping the cluster : " + str(cluster_id),
                 info_type=0,
                 info_list=info_list,
                 fp=f)
        stop_obj = stop_sacluster(params.cluster_info_all[cluster_id],
                                  auth_info,
                                  max_workers,
                                  fp=f,
                                  info_list=info_list,
                                  api_index=api_index)
        stop_obj()
        logger.debug("Finished stopping the cluster : " + str(cluster_id))
        printout("Finished stopping the cluster : " + str(cluster_id),
                 info_type=0,
                 info_list=info_list,
                 fp=f)

    if (index == True):
        logger.debug("Starting to modify the cluster : " + str(cluster_id))
        printout("Starting to modify the cluster : " + str(cluster_id),
                 info_type=0,
                 info_list=info_list,
                 fp=f)
        mod_obj = modify_sacluster(params.cluster_info_all[cluster_id],
                                   cluster_id,
                                   auth_info,
                                   ext_info,
                                   fp=f,
                                   info_list=info_list,
                                   api_index=api_index,
                                   max_workers=max_workers)
        mod_obj()
Example #11
0
 def click(self):
     spn = self.lineEdit.text() + ',' + self.lineEdit_2.text()
     coords = self.lineEdit_3.text() + ',' + self.lineEdit_4.text()
     self.final(get_params(coords, spn))
Example #12
0
def get_stats(feat_model_string, rsyncing, toy):
    from get_params import get_params
    get_params(cfg, feat_model_string, rsyncing, toy=toy)
Example #13
0
from Training import Training
from get_params import get_params

if __name__ == "__main__":
    Training(get_params())()
Example #14
0
def show_picture():
    response = requests.get(map_api_server, params=get_params(','.join(coords), str(spn) + ',' + str(spn)))
    file = open('file.png', 'wb')
    file.write(response.content)
    screen.blit(pygame.image.load('file.png'), (0, 0))
Example #15
0
def delete_main(api_index, f, info_list, max_workers):
    logger.debug('Setting authentication info')
    auth_info = authentication_cli(fp=f,
                                   info_list=info_list,
                                   api_index=api_index)

    logger.debug('loading external data')
    ext_info = external_data(auth_info, info_list=info_list, fp=f)

    logger.debug('Getting cluster information')
    params = get_params.get_params(ext_info,
                                   auth_info,
                                   info_list=info_list,
                                   f=f,
                                   api_index=api_index)
    params()
    params.show_cluster_info()

    index, cluster_id = get_cluster_id.get_cluster_id(params.cluster_info_all,
                                                      info_list, f, api_index)

    temp = conf_pattern_2("Delete the selected cluster?", ["yes", "no"],
                          "no",
                          info_list=info_list,
                          fp=f)

    state, obj = params.checking_status(cluster_id)

    if temp == "yes":
        if (state == False):
            conf_stop_process(info_list, f)

            logger.debug("Start stopping the cluster : " + str(cluster_id))
            printout("Start stopping the cluster : " + str(cluster_id),
                     info_type=0,
                     info_list=info_list,
                     fp=f)
            stop_obj = stop_sacluster(params.cluster_info_all[cluster_id],
                                      auth_info,
                                      max_workers,
                                      fp=f,
                                      info_list=info_list,
                                      api_index=api_index)
            stop_obj()
            logger.debug("Finished stopping the cluster : " + str(cluster_id))
            printout("Finished stopping the cluster : " + str(cluster_id),
                     info_type=0,
                     info_list=info_list,
                     fp=f)

        if (index == True):
            logger.debug("Start deleting the cluster : " + str(cluster_id))
            printout("Start deleting the cluster : " + str(cluster_id),
                     info_type=0,
                     info_list=info_list,
                     fp=f)
            delete_obj = delete_class.delete_sacluster(
                params.cluster_info_all[cluster_id],
                auth_info,
                max_workers,
                fp=f,
                info_list=info_list,
                api_index=api_index)
            delete_obj()
            logger.debug("Finished deleting the cluster : " + str(cluster_id))
            printout("Finished deleting the cluster : " + str(cluster_id),
                     info_type=0,
                     info_list=info_list,
                     fp=f)
        else:
            logger.debug('There are no clusters to stop')
    else:
        printout("Stop processing.", info_type=0, info_list=info_list, fp=f)
        sys.exit()
from get_params import get_params
import os
import convert_feats
import sys
import pickle
import numpy as np
import glob
from sklearn.metrics.pairwise import euclidean_distances
from mat_to_csv import mat_to_np
import time
""" OLD: Compute distances for extracted descriptors to query. This function is not longer used, distance computation is performed right after feature extraction. """
params = get_params()

DISTANCES_PATH  = os.path.join(params['root'], '6_distances',params['net'],params['database'] + params['year'],params['query_name'])
FEAT_PATH = params['root'] + '5_descriptors/' + params['net'] + '/' + params['database'] + params['year']
MAT_PATH = params['root'] + '4_object_proposals/' + params['region_detector'] + '/mat/' + params['database'] + params['year']

QUERY_FEAT_PATH = os.path.join(params['root'],'5_descriptors',params['net'],'query' + params['year'],params['query_name'])
BASELINE_RANKING = os.path.join(params['root'], '2_baseline',params['baseline'],params['query_name'] + '.rank')

if params['database'] == 'db' or params['database'] =='full' :
    IMAGE_PATH =  params['root'] + '1_images/' + params['database']
else:
    IMAGE_PATH =  params['root'] + '1_images/' + params['database'] + params['year']


def frames_in_shot(shot_name,path_to_frames):

    frame_list = glob.glob( os.path.join(path_to_frames,shot_name) + '/*.jpg')

    return frame_list
Example #17
0
# Date Feb 6, 2019
# 
# Main file for the intersection environment simulator

import math
import get_params
import traff
import numpy as np
import matplotlib.pyplot as plt

params = get_params.get_params()    # call a user-defined function
w_lane = params.w_lane
v_nominal = params.v_nominal
num_cars = num_cars
l_car = params.l_car
w_car = params.w_car
max_episode = params.max_episode
t_step_DT = params.t_step_DT

for episode in range(1,params.max_episode+1):    # simulation will be runned 1 time
    #plt.pyplot.close               # corresponds to close all of matlab, but need to check
    # Traffic initialization
    traffic = traff.initial()       # call a user-defined function, x,y, v, target traffic

    # Ego car 
    x_car = 0.5*w_lane       # ego vehicles' initial x position
    y_car = -4*w_lane        # ego vehicles' initial y position
    orientation_car = math.pi/2     # ego vehicles' initial heading angle (in the paper, yaw angle)
    v_car = v_nominal        # ego vehicles' initial speed
    target_car = 2                  # target car is 2(opponent)
Example #18
0
def index(lti=lti):
    """ Page d'acceuil, permet d'authentifier l'utilisateur.

    :param lti: the `lti` object from `pylti`
    :return: index page for lti provider
    """
    database = MySQLdb.connect(host="127.0.0.1",
                               port=3306,
                               user="******",
                               passwd="",
                               db="moodle")
    cHandler = database.cursor()
    cHandler.execute("SHOW TABLES LIKE 'mdl_comp_recommendation'")
    condition = cHandler.fetchall()
    if condition == ():
        return redirect(url_for('upload_exo_2'))
    if not os.path.isfile('data.json'):
        get_params(lti)
    cours_id = lti.user_id[0]
    cHandler.execute(
        "SELECT id_theme,theme FROM mdl_theme_recommendation WHERE cours_id=%s",
        cours_id)
    themes = cHandler.fetchall()
    for items in themes:
        algo.ajouterTheme(int(items[0]), items[1])
    cHandler.execute(
        "SELECT id_savoir_faire, savoir_faire FROM mdl_comp_recommendation WHERE cours_id=%s",
        cours_id)
    comp = cHandler.fetchall()
    for items in comp:
        cHandler.execute(
            "SELECT id_theme FROM mdl_comp_recommendation WHERE id_savoir_faire=%s",
            items[0])
        id_theme = cHandler.fetchall()
        if not id_theme == ():
            algo.ajouterCompetence(int(items[0]), items[1], id_theme[0][0], [])
    with open('data.json') as data_file:
        data = json.load(data_file)
    for studs in data["eleves"]:
        algo.ajouterEtudiant(studs['id'], studs['prenom'], studs['nom'],
                             studs['comp'], studs['res'])
    i = 0
    while not get_exo(i) == "" or i < 256:
        text = get_exo(i)
        cHandler.execute(
            "SELECT DISTINCT id_theme FROM mdl_exos_recommendation WHERE num_exo=%s",
            i)
        id_theme = cHandler.fetchall()
        cHandler.execute(
            "SELECT DISTINCT m1.id_savoir_faire FROM mdl_exos_recommendation m1 JOIN mdl_comp_recommendation m2 ON m1.id_savoir_faire=m2.id_savoir_faire WHERE m1.num_exo=%s",
            i)
        id_comp = cHandler.fetchall()
        tab_id_theme = []
        tab_id_comp = []
        for ids in id_theme:
            tab_id_theme.append(int(ids[0]))
        for ids in id_comp:
            tab_id_comp.append(int(ids[0]))
        algo.ajouterExercice(i, text, "", tab_id_theme, tab_id_comp, {}, 1)
        i += 1
    return render_template('index.html', lti=lti)
Example #19
0
def main_loop(argv):
    print "INTO PYTHON CODE"
    #%%
    # get parameters from UI interface
    Auto, patient, sensors, classifer, TH_Features, TH_params, LDA_Features, Features = get_params(
        sys.argv)

    # DEBUG::
    '''
    Auto=2
    patient="1"
    sensors=6
    classifer=2
    LDA_Features=[0,5,6]
    Features=[0,5,6,7]
    Auto=2
    print Auto
   '''

    if (int(Auto) == 2):
        sensors = 9
        LDA_Features = []
        Features = np.asarray([0, 1, 2, 4, 5, 6, 7, 8])
        print "Auto configuration is enabled"
        print "The following features will be use for lda"
        print Features
        sys.stdout.flush()
    else:
        print "Manual mode is enabled"
        print "The following features will be use for lda"
        print LDA_Features
        sys.stdout.flush()

    print Features
    print LDA_Features
    print TH_Features
    print TH_params
    print sensors
    print Auto
    '''
    TH_Features = [7]
    TH_params = [5]
    '''

    #creat metadata for the training
    metadata = {
        "stepsize": 32,
        "windowsize": 128,
        "samplingrate": 64,
        "sensors": int(sensors),
        "classifier": int(classifer)
    }

    #load data from DAPHNET dataset
    dirname = os.path.dirname(__file__)
    if (int(patient) == 2):
        name = os.path.join(dirname, "../dataset/S02R02.txt")
    else:
        name1 = ("../dataset/S0" + patient + "R01.txt")
        name = os.path.join(dirname, name1)

    print "Reading data from dataset..."
    sys.stdout.flush()

    data = np.loadtxt(name, usecols=range(0, 11))

    #%%
    '''
    # Relabel the data according to its window
    print "Labeling data..."
    sys.stdout.flush()
    labels = labeling.relabel(data[:,10],metadata)
    data[:,10] = labels
    '''

    # training data
    # 1. lda training data
    # 2. normal walking gait
    # 3. stop status
    Pos = np.array([[[780, 3140], [1500, 2200], [31000, 32000]],
                    [[200, 1200], [100, 3000], [16000, 17000]],
                    [[270, 4000], [31800, 32200], [30000, 31000]],
                    [[600, 980], [1000, 1700], [600, 800]],
                    [[500, 2500], [5000, 12000], [500, 4500]],
                    [[600, 1800], [42400, 43800], [31000, 34000]],
                    [[650, 1200], [2000, 4300], [6200, 6400]],
                    [[1680, 2600], [1000, 5000], [7000, 7200]]])

    pos = Pos[int(patient) - 1, :, :]

    #%% start training process
    print "Start training..."
    sys.stdout.flush()
    W, dtth, TG, mask, step_depth, thresholds = sa.self_adaptive(
        data, pos, metadata, Features, TH_Features, TH_params, LDA_Features,
        Auto)

    #%% finished training, save parameters into file
    # fill data
    print "writing parameters into file..."
    sys.stdout.flush()
    Step_depth = np.zeros((9, ), dtype=float)
    Thresholds = np.zeros((9, 9), dtype=float)
    Paras = np.zeros((9 * 9, ), dtype=float)

    lens = len(step_depth)
    for i in range(lens):
        Step_depth[i] = step_depth[i]

    #print thresholds.shape
    x, lens = thresholds.shape
    for i in range(lens):
        for j in range(9):
            Thresholds[j][i] = thresholds[j][i]

    #print W.shape

    counter = 0
    for i in range(9):
        for sensor in range(9):
            if (mask[i, sensor] == 1):
                Paras[i * 9 + sensor] = W[counter]
                counter = counter + 1

    #print "finishing filling W..."

    ## save into file
    dirname = os.path.dirname(__file__)
    name = os.path.join(dirname, "Parameters/P1T.txt")

    #print name
    with open(name, 'w') as f:
        for item in Step_depth:
            f.write("%s\n" % item)
        for row in Thresholds:
            np.savetxt(f, row)
        for row in mask:
            np.savetxt(f, row)
        for item in Paras:
            f.write("%s\n" % item)

        f.write("%s\n" % dtth)
        f.write("%s\n" % TG)

    print "finishing training..."
    sys.stdout.flush()

    f.close()
    return
Example #20
0
import json
from datetime import datetime as dt
from pred import build, predict
from models import ForecastModel
from get_params import get_params

subdir = 'downloads/'  # Default hyperparams subdirectory

data_param_fname = 'data_params.json'
model_param_fname = 'model_params.json'
feat_fname = 'feature_names.json'
weights_fname = 'weights.h5'

print('Getting model dependencies...')
get_params(subdir, data_param_fname, model_param_fname, feat_fname,
           weights_fname)

data_param = json.load(open(subdir + data_param_fname, 'r'))
model_param = json.load(open(subdir + model_param_fname, 'r'))
feature_names = json.load(open(subdir + feat_fname, 'r'))

print('Building model by passing in real data...')
pred_model = ForecastModel(model_param['input_shape'],
                           model_param['residual_shape'],
                           model_param['output_shape'],
                           model_param['return_sequences'],
                           model_param['rnn_units'])
pred_model = build(pred_model, data_param, model_param)

print('Loading weights...')
pred_model.load_weights(subdir + weights_fname)
Example #21
0
from get_params import get_params
import sys
import os
import numpy as np
import matplotlib.pylab as plt
import pickle
from select_samples import find_coordinates
import cv2
''' Obtain Saliency map for query images using SalNet '''

params = get_params(
)  # check get_params.py in the same directory to see the parameters

sys.path.insert(0, os.path.join(params['caffe_path'], 'python'))
import caffe


def init_net(params):

    deploy_file = os.path.join(params['saliency_model'], 'deploy.prototxt')
    model_file = os.path.join(params['saliency_model'], 'model.caffemodel')

    # I am using the mean file from caffenet...but I guess we could use a grey image as well ?
    mean_file = '/imatge/asalvador/work/chalearn/models/bvlc_reference_caffenet/meanfile.npy'

    caffe.set_mode_gpu()
    net = caffe.Classifier(deploy_file,
                           model_file,
                           mean=np.load(mean_file).mean(1).mean(1),
                           channel_swap=(2, 1, 0),
                           raw_scale=255)
Example #22
0
from Sampling import Sampling
from get_params import get_params

print(Sampling(get_params())('We', 500))
Example #23
0
from Training import Training
from get_params import get_params

Training(
    get_params(),
    cache_dir = './arxiv',
    categories = [
        'Machine Learning',
        'Neural and Evolutionary Computing',
        'Optimization'
    ],
    keywords = [
        'neural',
        'network',
        'deep'
    ]
    )()
Example #24
0
def teachers_class(lti=lti):
    [coursename, results] = get_params(lti)

    return render_template('displayStuds2.html',
                           results=results,
                           coursename=coursename)
Example #25
0
# -*- coding: cp1252 -*-
import numpy as np
from get_params import get_params
from build_database import build_database
from get_features import get_features
from train_classifier import train_classifier
from classify import classify
from eval_classification import eval_classification
from eval_classification import plot_confusion_matrix
import warnings
warnings.filterwarnings("ignore")

#Extraccio dels parametres
params = get_params()
#Creacio de la base de dades
params['split'] = 'train'
build_database(params)
params['split'] = 'val'
build_database(params)
#Extraccio de les caracteri­stiques
get_features(params)
#Entrenem un model de classificacio
train_classifier(params)
#Classificacio
classify(params)
#Avaluacio de la classificacio
f1, precision, recall, accuracy, cm, labels = eval_classification(params)
print "Mesures:\n"
print f1
print "-F1:", np.mean(f1)
print "-Precision:", np.mean(precision)
Example #26
0
## Example script to get Teff, [Fe/H], and [Ti/Fe] from a NIRSPEC-1 M dwarf spectrum

import pickle
from correct_throughput import correct_throughput
from get_params import get_params

## Read in example spectrum
with open('PM_I18007+2933.pkl', 'rb') as file:
    inspec = pickle.load(file)
    
## Relative-flux-calibrate and shift to v=0
## This step can take a few minutes, unset quiet=True
## to see proof that it is making progress.
wave, flam, fvar = correct_throughput(inspec, quiet=True)

## Get parameters
teff, feh, tife = get_params(wave, flam)

print('The "true" parameters for this star are: \n'
      'Teff = 3510 K, [Fe/H] = -0.080, [Ti/Fe] = +0.050 \n \n'
      'The inferred parameters are: \n'
      'Teff = {:4.0f} K, [Fe/H] = {:+6.3f}, [Ti/Fe] = {:+6.3f}'.format(
      teff, feh, tife))