예제 #1
0
def init_visualizer():
    vis = visualizer.Visualizer(500, 500)

    lst = [i for i in range(100)]
    random.shuffle(lst)

    return lst, vis
예제 #2
0
 def __init__(self, dirname, graph_name):
     self.HEIGHT = 600
     self.WIDTH = 800
     self.dirname = dirname
     self.graph_name = graph_name
     # Init window
     Gtk.Window.__init__(self, title="Disk data visualization")
     self.set_default_size(self.WIDTH, self.HEIGHT)
     # Header Bar
     self.hb = Gtk.HeaderBar()
     self.hb.set_show_close_button(True)
     self.hb.props.title = "Disk data visualization"
     self.set_titlebar(self.hb)
     # Scroll
     self.scroll = Gtk.ScrolledWindow(None, None)
     # Webview
     self.webview = WebKit.WebView()
     self.webview.set_size_request(self.WIDTH - 50, self.HEIGHT - 80)
     # Button
     self.button = Gtk.Button(label="Help")
     self.button.connect("clicked", self.on_button_clicked)
     # Add the stuff together
     self.hb.pack_end(self.button)
     self.scroll.add(self.webview)
     self.add(self.scroll)
     # Generate and display the graph
     self.vis = visualizer.Visualizer()
     self.vis.create_graph(self.dirname, self.graph_name) 
     self.webview.open("file://localhost" + path.abspath(self.dirname) + "/" + self.graph_name+ ".svg")
예제 #3
0
 def __init__(self,
              movers,
              domainSize,
              noise,
              peakThreshold=1e-2,
              resolution=9,
              singlePoint=True,
              display=False):
     self.viz = visualizer.Visualizer()
     self.movers = movers
     self.domainSize = domainSize
     self.singlePoint = singlePoint
     self.peakThreshold = peakThreshold
     self.resolution = resolution
     self.planeQueue = []
     w = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
     self.decayWeights = w / np.sum(w)
     #self.decayWeights = np.array([1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0])
     self.timeStep = 0
     self.noise = noise
     print("Decay weight sum: ", np.sum(self.decayWeights))
     print("Decay weights: ", self.decayWeights)
     if display:
         plt.plot(range(0, len(self.decayWeights), 1), self.decayWeights,
                  'go')
         plt.title("Decay weights")
         plt.show()
 def __init__(self, frameWidth, frameHeight):
     self.frameWidth = frameWidth
     self.frameHeight = frameHeight
     self.cam.init(9, 6, 'camera_cal/calibration*.jpg')
     self.cam.calibrate()
     self.homographyOp.setFrameSize(frameWidth, frameHeight)
     self.homographyOp.estimateRoadHomography()
     self.laneLinesFinder = lf.LaneLinesFinder(frameWidth, frameHeight)
     self.visualizer = visu.Visualizer(self.laneLinesFinder, self)
예제 #5
0
    def __init__(self, filename):
        # TODO 1 1b - nacist data z configuracniho xml souboru (nazev v parametru
        # filename)
        # TODO 2 0.5b - vytvorit instancni promenne (login, data, gameserver)
        # login - nastavit dle hodnoty prectene z XML
        # data - prazdny list, kde budou ukladana data ze serveru
        # gameserver - pripojit se k XML-RPC serveru (url v XML souboru)
        self.visualizer = visualizer.Visualizer()
        # TODO 3 0.5p - na serveru zavolat metodu add_player s parametrem login (v instancni promenne login)

        file = open(filename, "r")
        feed = file.read()
        root = ET.fromstring(feed)
        self.login = root.find("login").text
        self.data = []
        self.visualizer = visualizer.Visualizer()
        self.gameserver= xmlrpc.client.ServerProxy(root.find("url").text)
        self.gameserver.add_player(self.login)
예제 #6
0
    def __init__(self, planes, weights):
        self.referencePlanes = np.array(planes)
        print("Plane shape: ", self.referencePlanes.shape)
        self.weights = np.array(weights)
        self.viz = visualizer.Visualizer()
        self.baseResolution = 9

        #meshgrid used for gaussian calculations
        numGrids, xlength, ylength = self.referencePlanes.shape
        X = np.arange(0, xlength, 1)
        Y = np.arange(0, ylength, 1)
        self.X = X
        self.Y = Y
        self.X, self.Y = np.meshgrid(self.X, self.Y)
예제 #7
0
 def __init__(self, filename):
     # TODO 1 - nacist data z configuracniho xml souboru (nazev v parametru
     # filename)
     # TODO 2 - vytvorit instancni promenne (login, data, vizualizer,
     # gameserver)
     # login - naplnit daty prectenymi z XML
     # data - prazny list, kde budou ukladana data ze serveru
     # visualizer - instance tridy v visualizer.Visualizer
     # gameserver - pripojit se k XML-RPC serveru (url v XML souboru)
     # TODO 3 - na serveru zavolat metodu add_player s parametrem login
     # (v instancni promenne login)
     tree = ET.parse(filename)
     self.login = tree.find("login").text
     self.data = []
     self.visualizer = visualizer.Visualizer()
     self.gameserver = xmlrpclib.ServerProxy(tree.find("url").text)
     self.gameserver.add_player(self.login)
 def __init__(self, model, *args, **kwargs):
     super(Agent, self).__init__(model=model, *args, **kwargs)
     self.id = len(Agent.agents)
     self.observation_space_dim = None
     self.action_space_dim = None
     self.episode = np.int16(-1)
     self.nb_steps = np.int16(10**4)
     assert os.getenv("EPISODES"), "Env variable 'EPISODES' is not set"
     self.nb_episodes = int(os.getenv("EPISODES"))
     self.episode_step = 0
     self.step = 0
     self.last_observation = None
     self.last_action = None
     self.callbacks = None
     self.history = rl.core.History()
     Agent.agents.append(self)
     if self.visualize:
         self.visualizer = visualizer.Visualizer(self.rlcc_nodes_num)
예제 #9
0
def run_app(img1_path,
            img2_path,
            harris_thr,
            descriptor,
            patch_size,
            matching_threshold,
            ransac_sample_size,
            ransac_n_iterations,
            ransac_tolerance,
            ransac_inlier_threshold,
            visualize=True,
            experiment_id=None,
            case_id=None):

    # Prepare the results directory for the experiment
    save_results = False
    results_dir = None
    if experiment_id:
        save_results = True
        results_dir = "./Results/Exp-" + experiment_id + "/"
        if not os.path.exists(results_dir):
            os.makedirs(results_dir)
        print("Experiment: {} | Case: {}".format(experiment_id, case_id))

    # Read and prepare the images
    img1 = cv2.imread(img1_path)
    img1_rgb = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
    img1_gray = cv2.cvtColor(img1_rgb, cv2.COLOR_RGB2GRAY)

    img2 = cv2.imread(img2_path)
    img2_rgb = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
    img2_gray = cv2.cvtColor(img2_rgb, cv2.COLOR_RGB2GRAY)

    # Create the visualizer object
    vis = visualizer.Visualizer(img1_rgb, img2_rgb, visualize, save_results,
                                results_dir, case_id)

    # Create sift object (sift descriptor used only if enabled)
    sift = cv2.xfeatures2d.SIFT_create(nfeatures=500)

    # -------------------------------------------------------------------------
    # Perform Harris Corner detection

    img1_kpts, img2_kpts = utils.get_Harris_corners(img1_gray, img2_gray,
                                                    harris_thr)

    vis.set_keypoints(img1_kpts, img2_kpts)
    vis.draw_keypoints()

    # -------------------------------------------------------------------------
    # Extract descriptors

    if descriptor == 'opencv_sift':
        _, img1_descriptors = sift.compute(
            img1_gray, utils.cvt_to_cv2KeyPoints(img1_kpts))
        _, img2_descriptors = sift.compute(
            img2_gray, utils.cvt_to_cv2KeyPoints(img2_kpts))

    elif descriptor == 'custom_gray_intensities':
        img1_descriptors = descriptors.gray_intensities(
            img1_gray, img1_kpts, patch_size)
        img2_descriptors = descriptors.gray_intensities(
            img2_gray, img2_kpts, patch_size)

    elif descriptor == 'custom_rgb_intensities':
        img1_descriptors = descriptors.rgb_intensities(img1_rgb, img1_kpts,
                                                       patch_size)
        img2_descriptors = descriptors.rgb_intensities(img2_rgb, img2_kpts,
                                                       patch_size)

    # Normalize the descriptors
    img1_descriptors = utils.normalize(img1_descriptors)
    img2_descriptors = utils.normalize(img2_descriptors)

    # -------------------------------------------------------------------------
    # Get similarity matrices
    #   - High similarity = Low euc distance, High correlation
    #   - Common shape: [n_img1_kpts, n_img2_kpts]

    euc_distance_matrix = utils.compute_euclidean_distances(
        img1_descriptors, img2_descriptors)
    correlation_matrix = utils.compute_correlation(img1_descriptors,
                                                   img2_descriptors)

    # Matching keypoint pair indices.
    matching_kpt_pair_indices = utils.get_matchings(
        correlation_matrix,
        similarity_type='correlation',
        threshold=matching_threshold)

    # Visualize matchings
    vis.set_matches(matching_kpt_pair_indices)
    vis.draw_matches(title="Matching pairs of keypoints")

    # -------------------------------------------------------------------------
    # Perform RANSAC to obtain the affine matrix
    ransac_estimator = ransac.RANSAC_Estimator(ransac_sample_size,
                                               ransac_n_iterations,
                                               ransac_tolerance,
                                               ransac_inlier_threshold)

    affine_matrix, avg_residual, inlier_indices = ransac_estimator.estimate_affine_matrix(
        img1_kpts, img2_kpts, matching_kpt_pair_indices)

    metrics = {
        'n-inliers': None,
        'n-outliers': None,
        'avg-inlier-residual': None,
        'avg-inlier-euc-dist': None
    }

    metrics['n-inliers'] = inlier_indices.shape[0]
    metrics['n-outliers'] = matching_kpt_pair_indices.shape[
        0] - inlier_indices.shape[0]
    metrics['avg-inlier-residual'] = avg_residual
    metrics['avg-inlier-euc-dist'] = ransac.evaluate_model(
        affine_matrix, img1_kpts, img2_kpts, inlier_indices)

    if experiment_id:
        with open(results_dir + "result.txt", 'a') as result_file:
            result = "{:d},{:d},{:.2f},{:.2f}\n".format(
                metrics['n-inliers'], metrics['n-outliers'],
                metrics['avg-inlier-residual'], metrics['avg-inlier-euc-dist'])
            result_file.write(result)

    print("No. of inliers: {:.3f}".format(metrics['n-inliers']))
    print("No. of outliers: {:.3f}".format(metrics['n-outliers']))
    print("Avg. inlier residual (before refitting): {:.3f}".format(
        metrics['avg-inlier-residual']))
    print("Avg. inlier Euclidean distance (after refitting): {:.3f}".format(
        metrics['avg-inlier-euc-dist']))
    print("\n")

    vis.set_inliers(inlier_indices)

    # -------------------------------------------------------------------------
    # Apply Affine transform

    img2_warped = cv2.warpPerspective(
        img2_rgb, affine_matrix,
        (img1_gray.shape[1] + img2_gray.shape[1], img1_gray.shape[0]))
    vis.draw_matches(title="Inliers (blue) and Outliers (red)")
    vis.stitch_and_display(img2_warped, display_all=False)
예제 #10
0
matrix  = [[1,2,5],[3,4,0],[6,7,8]]

#path, cost, explored, search_depth = puzzle_solver.solve(matrix, 'a_star', prioritized=True, heuristic=Heuristic(distance.cityblock))
path, cost, explored, search_depth = puzzle_solver.solve(matrix, 'dfs')
path_list = []

print("PATH:")
while not path.empty():
    path_matrix = path.get().matrix
    print(path_matrix)
    path_list.append(path_matrix)
print()

print("COST:")
print(cost)
print()

print("EXPLORED:")
print([state.matrix for state in explored])
print()

print("No of turns:")
print(len(explored) - 1)
print()

print("SEARCH DEPTH:")
print(search_depth)
print()

puzzle_visualizer = visualizer.Visualizer(path_list, 140, -200, 240, 'black')
puzzle_visualizer.play()
예제 #11
0
def investigate(layerNames, layerIndexes, conditionOn, save_index=6):
	vis = visul.Visualizer(2*16)
	means = {}
	variations = {}
	ablations = {}
	coord = tf.train.Coordinator()
	sess = tf.Session()
	to_restore = {}
	with tf.variable_scope("GEN/"):
		Generator = model.WaveNetModel(1,
			dilations=g.options["dilations"],
			filter_width=g.options["filter_width"],
			residual_channels=g.options["residual_channels"],
			dilation_channels=g.options["dilation_channels"],
			skip_channels=g.options["skip_channels"],
			quantization_channels=g.options["quantization_channels"],
			use_biases=g.options["use_biases"],
			scalar_input=g.options["scalar_input"],
			initial_filter_width=g.options["initial_filter_width"],
			global_condition_cardinality=None,
			histograms=False,
			add_noise=True)
	variables_to_restore = {
		var.name[:-2]: var for var in tf.global_variables()
		if not (('state_buffer' in var.name or 'pointer' in var.name) and "GEN/" in var.name) }

	saver = tf.train.Saver(variables_to_restore)
	print("Restoring model")
	ckpt = tf.train.get_checkpoint_state(logdir)
	saver.restore(sess, ckpt.model_checkpoint_path)
	print("Model {} restored".format(ckpt.model_checkpoint_path))
	sampleph = tf.placeholder(tf.float32, [1,Generator.receptive_field,1])
	controlph = tf.placeholder(tf.float32, [1, None, g.options["residual_channels"]])
	eph = tf.placeholder(tf.float32, [1, None, g.options["residual_channels"]])
	noiseph = tf.placeholder(tf.float32, [1,1,g.options["noise_dimensions"]])
	encoded = ops.mu_law_encode(sampleph, g.options["quantization_channels"])
	sample = tf.placeholder(tf.float32)
	ablationsholder = {}
	ablationsholder[layerNames[0]] = {}
	ablationsholder[layerNames[0]][layerIndexes[0]] = controlph
	eholder = {}
	eholder[layerNames[0]] = {}
	eholder[layerNames[0]][layerIndexes[0]] = eph

	one_hot = Generator._one_hot(encoded)
	controlled_sample = Generator._create_ablated_network(one_hot, None, ablationsholder, eholder, noise=noiseph)
	c_arg_maxes = tf.nn.softmax(controlled_sample, axis=2)
	next_sample = Generator._create_network(one_hot, None, noise = noiseph)
	arg_maxes = tf.nn.softmax(next_sample, axis=2)
	decoded = ops.mu_law_decode(sample, g.options["quantization_channels"])
	#print(np.shape(arg_maxes))
	# Sampling with argmax atm
	#intermed = tf.sign(tf.reduce_max(arg_maxes, axis=2, keepdims=True)-arg_maxes)
	#one_hot = (intermed-1)*(-1)
	#fake_sample = tf.concat((tf.slice(encoded, [0,1,0], [-1,-1,-1]), appendph),1)

	
	# Start audio
	audio, sr = librosa.load(conditionOn, g.options["sample_rate"], mono=True)
	# This should be previously generated part of the experiment
	#start = np.random.randint(0,len(audio)-Generator.receptive_field)
	#fakey = audio[start:start+Generator.receptive_field]
	fakey = audio[-Generator.receptive_field:]
	print(np.shape(fakey))
	noise = np.random.normal(g.options["noise_mean"], g.options["noise_variance"], size=g.options["noise_dimensions"]).reshape(1,1,-1)

	for name in layerNames:
		means[name] = {}
		ablations[name] = {}
		variations[name] = {}
		for i in layerIndexes:
			#ablations[name][i] = get_causal_activations(Generator._get_layer_activation(name, i, one_hot, None, noise=zeros),i)
			ablations[name][i] = Generator._get_layer_activation(name, i, one_hot, None, noise=noiseph)
			abl = tf.reduce_mean(ablations[name][i], axis=[0,1])
			means[name][i] = tf.Variable(tf.zeros(tf.shape(abl)), name="ABL/mean_"+name+str(i))
			to_restore["ABL/mean_"+name+str(i)] = means[name][i]
			variations[name][i] = tf.Variable(tf.zeros(tf.shape(abl)), name="ABL/var_"+name+str(i))
			to_restore["ABL/var_"+name+str(i)] = variations[name][i]


	print("Restoring previous statistics")
	ablatesaver = tf.train.Saver(to_restore)
	ablateckpt = tf.train.get_checkpoint_state(ablatelogs)
	if ablateckpt is not None:
		optimistic_restore(sess, ablateckpt.model_checkpoint_path, tf.get_default_graph())
	print("Statistics restored")

	name = layerNames[0]
	i = layerIndexes[0]
	
	limits = means[name][i] + variations[name][i]
	mask = ablations[name][i] > limits
	mask_ph = tf.placeholder(tf.bool)
	causal_ph = tf.placeholder(tf.float32)
	causal_counter = tf.cast(mask, tf.float32) + causal_ph
	stillactive = tf.logical_and(mask, mask_ph)
	fakey = np.reshape(fakey, [1,-1,1])
	generated = sess.run(encoded, feed_dict={sampleph : fakey})
	fakey = sess.run(one_hot, feed_dict={sampleph : fakey})
	sl = Generator.receptive_field
	length=sl*1+1
	bar = progressbar.ProgressBar(maxval=length, \
		widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
	bar.start()
	prevNote = ""
	counter = 0
	act =  sess.run(ablations[name][i], feed_dict={one_hot : fakey, noiseph : noise})
	print(np.shape(act))
	causal_count =  sess.run(tf.cast(tf.zeros_like(mask), tf.bool), feed_dict={ablations[name][i] : act})
	for k in range(length):
		act, prediction = sess.run([ablations[name][i], arg_maxes], feed_dict={one_hot : fakey, noiseph : noise})
		#fakey = sess.run(fake_sample, feed_dict={encoded : fakey, appendph : prediction})
		newest_sample = prediction[-1,-1,:]
		newmask = sess.run(mask, feed_dict={ablations[name][i] : act})
		causal_count = sess.run(causal_counter, feed_dict={causal_ph : causal_count,  mask:newmask})
		#print(sess.run(tf.reduce_sum(causal_count, axis=[0,1])))
		sample = np.random.choice(
			np.arange(g.options["quantization_channels"]), p=newest_sample)
		#sample = np.argmax(newest_sample)			
		generated = np.append(generated, np.reshape(sample,[1,1,1]), 1)
		if counter % sl == 0 and counter != 0:
			decoded = sess.run(ops.mu_law_decode(generated[0,-sl:,0], g.options["quantization_channels"]))
			note = vis.detectNote(decoded, g.options["sample_rate"])
			amp = vis.loudness(decoded)
			print("note: %s, amp %0.4f"%(note, amp))
			if prevNote != note: #and amp > 1.:
				#print("note: %s, amp %0.4f"%(note, amp))
				prevNote = note
				break
		counter += 1
		fakey = sess.run(one_hot, feed_dict={encoded : generated[:,-Generator.receptive_field:,:]})
		bar.update(k+1)
	bar.finish()
	save_ctrl=np.reshape(generated,[-1])[-sl:]
	decoded = sess.run(ops.mu_law_decode(save_ctrl, g.options["quantization_channels"]))
	save_ctrl = np.array(decoded)
	librosa.output.write_wav("Generated/Comparision/to_copy_"+str(save_index)+".wav", save_ctrl, sr, norm=True)

	causal_count = causal_count / length
	print(sess.run(tf.reduce_sum(causal_count, axis=[0,1])))
	print(np.shape(act))
	ablat = sess.run(tf.tile(tf.reshape(limits, [1,1,-1]), [1,np.shape(act)[1],1]))
	print("Target == " + note)
	target=note
	target_freq = vis.getFreq(target)
	# Get new bit of audio for the generator
	#start = np.random.randint(0,len(audio)-Generator.receptive_field)
	#fakey = audio[start:start+Generator.receptive_field]
	fakey = audio[-Generator.receptive_field:]
	fakey = np.reshape(fakey, [1,-1,1])
	generated = sess.run(encoded, feed_dict={sampleph : fakey})
	uncontrolled_generated = generated
	fakey = sess.run(one_hot, feed_dict={sampleph : fakey})
	uncontrolled = fakey
	counter = 0
	np.random.seed() # Set seed
	for k in range(length):
		c_prediction = sess.run(c_arg_maxes, feed_dict={one_hot : fakey, ablationsholder[name][i] : ablat, eholder[name][i] : causal_count, noiseph : noise})
		prediction = sess.run(arg_maxes, feed_dict={one_hot : uncontrolled, noiseph : noise})

		c_newest_sample = c_prediction[-1,-1,:]
		newest_sample = prediction[-1,-1,:]

		c_sample = np.random.choice(
			np.arange(g.options["quantization_channels"]), p=c_newest_sample)
		sample = np.random.choice(
			np.arange(g.options["quantization_channels"]), p=newest_sample)

		#sample = np.argmax(newest_sample)			
		generated = np.append(generated, np.reshape(c_sample,[1,1,1]), 1)
		uncontrolled_generated = np.append(uncontrolled_generated, np.reshape(sample,[1,1,1]), 1)
		fakey = sess.run(one_hot, feed_dict={encoded : generated[:,-Generator.receptive_field:,:]})
		uncontrolled = sess.run(one_hot, feed_dict={encoded : uncontrolled_generated[:,-Generator.receptive_field:,:]})
		if counter % sl == 0 and counter != 0:
			decoded = sess.run(ops.mu_law_decode(generated[0,-sl:,0], g.options["quantization_channels"]))
			note = vis.detectNote(decoded, g.options["sample_rate"])
			note_freq =vis.getFreq(note)
			tamp = vis.loudness(decoded)
			print("note: %s, amp %0.4f, freq error (abs): %0.4f"%(note, tamp, np.abs(target_freq-note_freq)))
		counter += 1

	generated=np.reshape(generated,[-1])
	decoded = sess.run(ops.mu_law_decode(generated, g.options["quantization_channels"]))
	generated = np.array(decoded)
	librosa.output.write_wav("Generated/Comparision/controlled_"+str(save_index)+".wav", generated, sr, norm=True)

	uncontrolled_generated=np.reshape(uncontrolled_generated,[-1])
	u_decoded = sess.run(ops.mu_law_decode(uncontrolled_generated, g.options["quantization_channels"]))
	uncontrolled_generated = np.array(u_decoded)
	librosa.output.write_wav("Generated/Comparision/uncontrolled_"+str(save_index)+".wav", uncontrolled_generated, sr, norm=True)
	sess.close()
예제 #12
0
def show(datafile: str):
    data = np.load(datafile)
    dataList = [d for d in data.values()]

    v = vis.Visualizer()
    v.show(dataList, len(dataList))
예제 #13
0
def run_viz(nvm_pipe):
    viz = vz.Visualizer(nvm_pipe)
    viz.launch()
예제 #14
0
 def consume_results(self):
     rl = rabbitListener.RabbitListener(
         self.channel,
         visualizer.Visualizer(self.votings_names, self.chart_type),
         self.conf_file)
     rl.start_consuming()
예제 #15
0
from util import *
"""Controls the circulation process between visualizer, sorting algorithms and plotter."""


def _get_args():
    ap = argparse.ArgumentParser(
        description="Sorting algorithms visualization.")
    ap.add_argument('-a',
                    '--algorithm',
                    type=str,
                    help="Choose algorithm",
                    required=True)
    args = ap.parse_args()
    return args.algorithm


algorithm = _get_args()
sorter = Sorter(enable_tracking=True)
sorting_functions = {
    'Selection': sorter.selection_sort,
    'Insertion': sorter.insertion_sort,
    'Bubble': sorter.bubble_sort,
    'Heap': sorter.heap_sort,
    'Quick': sorter.quick_sort,
    'Merge': sorter.merge_sort
}
func = sorting_functions.get(algorithm)
func(get_random_case(50, 10000))
vis = visualizer.Visualizer(sorter.track)
vis.start()
예제 #16
0
 def __init__(self, mergedPlane):
     self.mergedPlane = mergedPlane
     self.viz = visualizer.Visualizer()
     self.peakThresholdToKeep = 0.015
예제 #17
0
from turbine import Turbine
from turbine_config import TurbineConfig
import os
from os.path import join, isfile, realpath
from numpy import array
from time import time

cfg = TurbineConfig.load('test/turbine_unittest.cfg')
turbine = Turbine(cfg)

import visualizer
vis = visualizer.Visualizer(turbine.env)

import blade_coverage
import db

grid = 7
robot = turbine.robot
manip = robot.GetActiveManipulator()
robot.GetLink('Flame').Enable(False)
DB = db.DB(join(os.environ['PYTHON_DATABASE'], 'FACE'), turbine)

T = turbine.blades[3].GetTransform()

DB.T = T
psa = (1.8500000000000032, 0.53725006964517363, 0.82546707480056725)

threshold = 5e-2
t = time()
path = blade_coverage.base_grid_validation(turbine, psa, DB, grid, threshold)
print time() - t
예제 #18
0
import preprocessing as pp
import classifier as cc
import visualizer as vs
from sklearn import datasets
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt

X = datasets.load_iris()
# df = pd.DataFrame(data=X.data,columns=X.feature_names)
V = vs.Visualizer()
V.heatmap(X.data, X.feature_names)
예제 #19
0
#!/usr/bin/env python
from multiprocessing import Process
import sys
import visualizer

visualizer.Visualizer()
예제 #20
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    if args.dataset == 'ucf101':
        num_class = 101
    elif args.dataset == 'hmdb51':
        num_class = 51
    elif args.dataset == 'kinetics':
        num_class = 400
    else:
        raise ValueError('Unknown dataset ' + args.dataset)
    '''
    consensue_type = avg
    base_model = resnet_101
    dropout : 0.5
    
    '''
    model = TSN(num_class,
                args.num_segments,
                args.modality,
                base_model=args.arch,
                consensus_type=args.consensus_type,
                dropout=args.dropout,
                partial_bn=not args.no_partialbn)

    #224
    crop_size = model.crop_size
    #256/224
    scale_size = model.scale_size
    # for each modiltiy is different
    input_mean = model.input_mean
    input_std = model.input_std

    policies = model.get_optim_policies()
    #这里拥有三个augmentation
    #GroupMultiScaleCrop,GroupRandomHorizontalFlip
    #here GropMultiScaleCrop ,is a easily method for 裁剪边用固定位置的crop并最终resize 到 224 ,采用的插值方式,为双线性插值
    #GroupRandomHorizontalFlip
    train_augmentation = model.get_augmentation()
    print(args.gpus)
    model = model.cuda()

    if args.resume:
        if os.path.isfile(args.resume):
            print(("=> loading checkpoint '{}'".format(args.resume)))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print(("=> loaded checkpoint '{}' (epoch {})".format(
                args.evaluate, checkpoint['epoch'])))
        else:
            print(("=> no checkpoint found at '{}'".format(args.resume)))

    cudnn.benchmark = True

    # Data loading code
    if args.modality != 'RGBDiff':
        normalize = GroupNormalize(input_mean, input_std)
    else:
        normalize = IdentityTransform()

    if args.modality == 'RGB':
        data_length = 1
    elif args.modality in ['Flow', 'RGBDiff']:
        data_length = 5

    #解释说这里为什么要有roll,主要还是考虑到我们所训练的是对于BGR 还是RGB
    train_loader = torch.utils.data.DataLoader(TSNDataSet(
        "",
        args.train_list,
        num_segments=args.num_segments,
        new_length=data_length,
        modality=args.modality,
        image_tmpl="im{}.jpg",
        transform=torchvision.transforms.Compose([
            train_augmentation,
            Stack(roll=args.arch == 'BNInception'),
            ToTorchFormatTensor(div=args.arch != 'BNInception'),
            normalize,
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)
    val_loader = torch.utils.data.DataLoader(TSNDataSet(
        "",
        args.val_list,
        num_segments=args.num_segments,
        new_length=data_length,
        modality=args.modality,
        image_tmpl="im{}.jpg",
        random_shift=False,
        transform=torchvision.transforms.Compose([
            GroupScale(int(scale_size)),
            GroupCenterCrop(crop_size),
            Stack(roll=args.arch == 'BNInception'),
            ToTorchFormatTensor(div=args.arch != 'BNInception'),
            normalize,
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # define loss function (criterion) and optimizer
    if args.loss_type == 'nll':
        criterion = torch.nn.CrossEntropyLoss().cuda()
    else:
        raise ValueError("Unknown loss type")
    #see the optim policy
    for group in policies:
        print(('group: {} has {} params, lr_mult: {}, decay_mult: {}'.format(
            group['name'], len(group['params']), group['lr_mult'],
            group['decay_mult'])))
    # general the lr here is 1e-3
    optimizer = torch.optim.SGD(policies,
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    #如果说这里是验证过程,如果说不是验证过程
    if args.evaluate:
        validate(val_loader, model, criterion, 0)
        return
    viz = vis.Visualizer()
    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch, args.lr_steps)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, viz)

        # evaluate on validation set
        if (epoch + 1) % args.eval_freq == 0 or epoch == args.epochs - 1:
            prec1 = validate(val_loader, model, criterion, epoch, viz=viz)

            # remember best prec@1 and save checkpoint
            is_best = prec1 > best_prec1
            best_prec1 = max(prec1, best_prec1)
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'test_crops': model.state_dict(),
                    'best_prec1': prec1,
                }, is_best)
예제 #21
0
# This script created actions graph with two actions in this dir called actions.svg
# You can open it in browser to see if the graph generated correctly
### IMPORTANT ###
# Before running this file please create a device file loop0 using dd and losetup
# The disk can be about 30 MB in size
import sys
sys.path.append("./..")

import blivet
import visualizer
from blivet.size import Size

b = blivet.Blivet()
b.reset()
loop = b.devicetree.getDeviceByName("loop0")
b.destroyDevice(loop)
v = visualizer.Visualizer(blivet=b, palletePath="../assets/pallete.xml")
v.create_actions_graph(".", "actions")
예제 #22
0
def visualize(evaluation_result):
    phrases = evaluation_result.get("phrases", None)
    print(phrases[0].__dict__["op_type"])
    
    tex = evaluation_result.get("source_tex_file", None)
    v = visualizer.Visualizer(phrases, tex)
예제 #23
0
import preprocessing as pp
import classifier as cc
import visualizer as vs
from sklearn import datasets
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd


cols = [i for i in range(0,32)]
df = pd.read_csv('breast_cancer.csv',index_col=32).drop("id",axis=1).drop("diagnosis",axis=1)
cols = df.columns
for i in df.columns:
    if "radius_mean" not in i and "texture_mean" not in i:
        df = df.drop(i,axis=1)
vs.Visualizer().boxplot(df.values,df.columns)
vs.Visualizer().violinplot(df.values,df.columns)
vs.Visualizer().pairplot(df.values,df.columns)
vs.Visualizer().histogram(df.values,df.columns)
예제 #24
0
    numEventTypes = len(eventTypeDict)
    eventTypeList = [None] * numEventTypes
    assert (numEventTypes > 0), "No event types detected (Assertion)...\n"
    for i in range(0, numEventTypes):
        eventTypeList[i] = eventTypeDict[i]

    # Initialize event class
    evn = event.Event(sys.argv[1])
    evn.setEventType(eventTypeList)

    # Initialize outlier class
    otl = outlier.Outlier(sys.argv[1])

    # Initialize visualizer class
    maxDepth = int(config['Visualizer']['MaxFunDepth'])
    viz = visualizer.Visualizer(sys.argv[1])

    # Reset visualization server
    viz.sendReset()

    # In nonstreaming mode send function map and event type to the visualization server
    viz.sendEventType(eventTypeList, 0)
    funMap = prs.getFunMap()
    viz.sendFunMap(list(funMap.values()), 0)
else:
    # Initialize event object
    evn = event.Event(sys.argv[1])

    # Initialize outlier object
    otl = outlier.Outlier(sys.argv[1])
예제 #25
0
파일: server.py 프로젝트: solute/hrt
    def __init__(self, *args):
        BaseHTTPServer.HTTPServer.__init__(self, *args)

        self.running = True
        self.visualizer = visualizer.Visualizer()
예제 #26
0
import visualizer

vis = visualizer.Visualizer(win_height=700,
                            win_width=1000,
                            line_width=5,
                            space=1.5)
vis.render('elijah')
예제 #27
0
    def itrgenerate(self, x, l):
        if l % 2 == 0:
            y = (1 + math.sin(x / math.pi)) * 0.5
        else:
            y = (1 + math.cos(x / math.pi)) * 0.5
        y += self.noise(x)
        return y

    def generate(self, x):
        arr = [0 for _ in xrange(self.num)]
        for l in xrange(self.num):
            arr[l] = self.itrgenerate(x, l)
        return arr

    def next(self):
        x = self.i
        self.i += 1
        return self.generate(x)


if __name__ == '__main__':
    try:
        vis = plt.Visualizer()
        gen = SimpleGenerator()
        for y in gen:
            print("{}".format(y))
            vis.append(y)
            time.sleep(0.5)
    except Exception as e:
        print(str(e))
예제 #28
0
import tkinter as tk
import visualizer

if __name__ == '__main__':
    root = tk.Tk()
    path = "/Users/Roger/Projects/bike-computer/data/"
    vis = visualizer.Visualizer(root, path)

    #root.attributes('-fullscreen',True)
    root.overrideredirect(True)
    #root.after(1, dbg.update_display)
    root.mainloop()
예제 #29
0
prepro = Preprocessing()
prepro.handleMissing(dataset)

x = dataset.drop(['price','cut','color','clarity'],axis = 1)
y = dataset['price']

x = prepro.scale(x)

encode_col = dataset[['cut','color','clarity']]
encode_col  = prepro.encode(encode_col)

x = np.concatenate((x,encode_col),axis=1)

X_train, X_test, y_train, y_test = train_test_split(x, y,random_state=0,test_size=0.33)

vis.Visualizer().scatterplot(X_test[:,0],y_test.iloc[:])

# Linear Regression
regressor = reg.Regressor(type=reg.LINEAR_REGRESSION)
regressor.fit(X_train, y_train)
print("******************Linear Regression******************")
print(regressor.score(X_test,y_test))
#vis.Visualizer().scatterplot(X_test[:,0],y_test.iloc[:],regressor)
print("*************************************************")



# polynomial Regression
params = dict(degree = 5)
regressor = reg.Regressor(type=reg.POLY_REGRESSION, **params)
regressor.fit(X_train, y_train)