Beispiel #1
0
 def _set_weight(self, feature, weight):
     logger.debug("Setting feature: %s frow weight: %s to weight: %s",
                  util.pp(feature), util.pp(self._get_weight(feature)),
                  util.pp(weight))
     assert not np.isnan(weight.mean)
     assert weight.variance >= 0.0
     self._weights[util.serialize_feature(feature)] = weight
Beispiel #2
0
def middle_edge(s1, s2, sigma=-5, matrix='blosum62'):
    
    half = math.floor(len(s2)) / 2)
    mat = read_score_matrix(matrix)
    sink = (len(s1) + 1) * (half + 1) - 1 

    edges = _make_edges(s1, s2[:half], mat, sigma)
    dag = DAG(sink, edges)
    middle_nodes = [(k,v) for k,v in dag.items() if (k+1) % (half+1) == 0]
    print(util.pp(middle_nodes, join_char='\n'))
 
    #print('\n'.join(['%d: %r' %(k,v) for k,v in dag.items()]))
    lg_dict = longest_paths(0, sink, dag)
    #m_nodes = [k for k, v in middle_nodes]
    nodes = [(k,lg_dict[k]) for k,v in middle_nodes]
    print(nodes)
    print('---')

    r_s1, r_s2 = s1[::-1], s2[::-1]
    edges = _make_edges(r_s1, r_s2[:half], mat, sigma)
    dag = DAG(sink, edges)
    r_middle_nodes = [(k,v) for k,v in dag.items() if (k+1) % (half+1) == 0]
    r_lg_dict = longest_paths(0, sink, dag)
    rm_nodes = [k for k, v in r_middle_nodes]
    sm = rm_nodes[0] + rm_nodes[-1]
    r_nodes = [(sm-k,lg_dict[k]) for k,v in middle_nodes[::-1]]
    #r_nodes = [sm-n for n in r_nodes]

    print(util.pp(middle_nodes, join_char='\n'))
    print(r_nodes)


    return #(length, path)
 def _set_weight(self, feature, weight):
     logger.debug("Setting feature: %s frow weight: %s to weight: %s",
                  util.pp(feature),
                  util.pp(self._get_weight(feature)),
                  util.pp(weight))
     assert not np.isnan(weight.mean)
     assert weight.variance >= 0.0
     self._weights[util.serialize_feature(feature)] = weight
    def existing_users(self):
        r = self.get_request('account/users', {'namespaces':[self.namespace]})
        status = r['status']
        if status != 'success':
            pp(r)

            raise TemporaryError("Failed to retrieve user list; status %s" % status)
        raw_user_list = r['users'][self.namespace]
        uids = [user.split('@')[0] for user in raw_user_list]
        return uids
Beispiel #5
0
    def test_pp(self):
        # TODO: Jared to look at exactly what is expected from pp

        self.assertEqual(util.pp("a"),"a")
        a_dict = {'m': 'M', 'a': 'A', 'r': 'R', 'k': 'K'}
        expected_str = "{a: A, k: K, m: M, r: R}"
        self.assertEqual(util.pp(a_dict),expected_str)

        # This behaviour is odd, pretty print doesn't return a string object
        # all the time
        a_tuple_list = ("my","tuple")
        self.assertEqual(util.pp(a_tuple_list),str(a_tuple_list))
Beispiel #6
0
    def test_pp(self):
        # TODO: Jared to look at exactly what is expected from pp

        self.assertEqual(util.pp("a"), "a")
        a_dict = {"m": "M", "a": "A", "r": "R", "k": "K"}
        expected_str = "{a: A, k: K, m: M, r: R}"
        self.assertEqual(util.pp(a_dict), expected_str)

        # This behaviour is odd, pretty print doesn't return a string object
        # all the time
        a_tuple_list = ("my", "tuple")
        self.assertEqual(util.pp(a_tuple_list), str(a_tuple_list))
Beispiel #7
0
    def _apply_dynamics(self, weight):
        prior = util.prior_weight()
        adjusted_variance = weight.variance * prior.variance / \
            ((1.0 - self._config.epsilon) * prior.variance +
             self._config.epsilon * weight.variance)
        adjusted_mean = adjusted_variance * (
            (1.0 - self._config.epsilon) * weight.mean / weight.variance +
            self._config.epsilon * prior.mean / prior.variance)

        adjusted = pb.Gaussian(mean=adjusted_mean, variance=adjusted_variance)
        logger.debug("Adjusting weight %s to %s", util.pp(weight),
                     util.pp(adjusted))
        return adjusted
    def _apply_dynamics(self, weight):
        prior = util.prior_weight()
        adjusted_variance = weight.variance * prior.variance / \
            ((1.0 - self._config.epsilon) * prior.variance +
             self._config.epsilon * weight.variance)
        adjusted_mean = adjusted_variance * (
            (1.0 - self._config.epsilon) * weight.mean / weight.variance +
            self._config.epsilon * prior.mean / prior.variance)

        adjusted = pb.Gaussian(mean=adjusted_mean, variance=adjusted_variance)
        logger.debug("Adjusting weight %s to %s",
                     util.pp(weight), util.pp(adjusted))
        return adjusted
Beispiel #9
0
def test_locators(config_path):
    with open(config_path, "r") as config_file:
        config = yaml.load(config_file, Loader=yaml.FullLoader)

    for spec in config:
        tree = ps_tree()
        print("Testing locator:")
        pp(spec["window_locator"])
        try:
            result = next(ps_tree_query(tree, **spec["window_locator"]))
            if result["children"]:
                result["children"] = "[...]"
        except StopIteration:
            result = None
        pp(result)
    def get_request(self, method, params = None, raw_resp = False, max_attempts = 10, log = False, debug = False):

        all_params = [('api_key', self.api_key),
                      ('api_secret', self.api_secret)]

        if params:
            for k, v in params.items():
                if v == None:
                    continue
                if type(v) == list:
                    v = ','.join(v)
                all_params.append((k,v))

        components = [self.rest_url,
                      method,
                      '.',
                      self.format,
                      '?',
                      self.fake_encode(all_params)]

        request_url = ''.join(components)

        if debug:
            print "request_url: %s" % request_url

        result = fetch(request_url)

        if result['status'] == 200:
            if debug:
                print "Success"
            resp_data = result['data']
            r = simplejson.loads(resp_data)
            status = r['status']
            if status != 'success':
                pp(r)

                raise TemporaryError("Response received with status %s" % status)
            if raw_resp:
                return r, resp_data
            else: 
                return r
        else:      
            raise TemporaryError("Received HTTP status %s" % result['status'])
Beispiel #11
0
def cmd(action, wait=500, args=None):
    global uart
    if uart == None: open_uart()
    if action not in commands.keys():
        from util import pp
        print('Invalid command.')
        pp(commands)
        del pp
        return
    command = commands[action]
    #print("Executing %s [%s]" % ( command, action ) )
    # flush input
    while uart.any():
        uart.read(1)
    uart.write('AT+%s%s\n' % (command, '=%s' % args if args else ''))
    sleep_ms(wait)
    uart.readline()
    buf = uart.readline()

    if not buf: return None
    return to_string(buf)
    '''
Beispiel #12
0
def ppr(s):
	"""pretty result of executing the soundcloud API request s"""
	pp(client.r(s))
Beispiel #13
0
def pstree():
    pp(ps_tree())
Beispiel #14
0
# recreate file
inp = 'y'  #input('Recreate data file? (y/n)\n')
if inp.lower() == 'y':
	os.remove('data.csv')
	with open('data.csv', 'w') as data_file:
		csv_writer = csv.writer(data_file, delimiter=',', quotechar="'", quoting=csv.QUOTE_MINIMAL, lineterminator = '\n')
		tmp = []
		for s in sites:
			tmp.append(s)
		csv_writer.writerow(['time'] + tmp)

# check sites
while True:
	if int(round(time.time() * 1000)) % 600000 < 1000:
		util.pp('INFO', 'testing sites...  ')

		tmp = []

		for site in sites:
			status_code = 0
			text = None
			try:
				req = requests.get(sites[site], timeout=10)
				status_code = req.status_code
				tmp.append(status_code)
				if status_code != 200:
					text = str(req.text)
			except Exception as e:
				status_code = -1
				tmp.append(status_code)
Beispiel #15
0
util.defaultModel(training_data_Standard, training_data_Standard, "Perceptron")
util.defaultModel(training_data_normal, testing_data_normal, "Perceptron")

training_data_pca, testing_data_pca = util.featureSelection(
    training_data_Standard, testing_data_Standard, "PCA")
training_data_kb, testing_data_kb = util.featureSelection(
    training_data_Standard, testing_data_Standard, "KB")

#Compare the PCA and k Best for dimensionality adjustment
util.defaultModel(training_data_pca, testing_data_pca, "NaiveBayes")
util.defaultModel(training_data_kb, testing_data_kb, "NaiveBayes")

#Compare the default perceptron
util.defaultModel(training_data_pca, testing_data_pca, "Perceptron")
#Shuffling the training set to get the best perceptron performance in 50 times
util.pp(training_data_pca, testing_data_pca)

#Compare the default NaiveBayes(baseline)
util.defaultModel(training_data_pca, testing_data_pca, "NaiveBayes")

#Compare the default MLF
util.defaultModel(training_data_pca, testing_data_pca, "MLF")

#Get the best c and kernel in SVM from validation score and get their test score and confusion matrix
best_rbf_c = sd.detailedSVM(training_data_pca, testing_data_pca, 'rbf')
sd.CCsvm(training_data_pca, testing_data_pca, 'rbf', best_rbf_c)

best_poly_c = sd.detailedSVM(training_data_pca, testing_data_pca, 'poly')
sd.CCsvm(training_data_pca, testing_data_pca, 'poly', best_poly_c)

best_linear_c = sd.detailedSVM(training_data_pca, testing_data_pca, 'linear')
Beispiel #16
0
from datetime import datetime
from collections import defaultdict
import cProfile, pstats, io





f_name = 'data/test.txt' if len(sys.argv) == 1 else sys.argv[1]
with open(f_name, 'r') as f:
  
    #k = int(f.readline().strip())
    s1 = f.readline().strip()
    s2 = f.readline().strip()
    #perm = [int(k) for k in f.readline().split(' ')]
    res = bio5.middle_edge(s1, s2)
    #print(res)
    print(util.pp(res, join_char='\n'))

    #print(datetime.now()-now)

    #src = int(f.readline().strip())
    #sink = int(f.readline().strip())
    #edges = [bio5.make_edge(l.strip()) for l in f.readlines()]
    #dag = bio5.DAG(sink, edges)
    #res = bio5.longest_path(src, sink, dag)
    #res = bio5.longest_path_fmt(res)
    #print(res)
    #print(util.pp(res, join_char='\n'))

Beispiel #17
0
# loads config file
from config import auth


# login or report error
try:
    client = SoundcloudClient(auth)
except Abort as a:
	print(a.msg())
	sys.exit(1)

def ppr(s):
	"""pretty result of executing the soundcloud API request s"""
	pp(client.r(s))

group = client.my_groups()[0]

pp(select(group, ["name","id"]))

t = "artwork_url,bpm,comment_count,genre,id,playback_count,tag_list,title"
t = t.split(',')

for track in client.pending(group):
    try:
        pp(select(track,t))
        client.download(track, "pending")
    except Abort as a:
        print(a.msg())
        print(a.cause())
        print("skipping track %s" % track['title'])