def test_photoreceptor(
        print_axon=True,
        print_postsynaptic=True,
        print_photoreceptor=True,
        print_dendrite=False,
        print_synaptic_cleft=False,
        spike_strengths=[-100]):

    data = []
    length = 60
    period = 100
    rest_length = 50

    for strength in spike_strengths:
        neuron_factory = NeuronFactory()
        photoreceptor = neuron_factory.create_neuron(
            neuron_type=NeuronTypes.PHOTORECEPTOR,
            record = True)
        post = neuron_factory.create_neuron(record = True)

        synapse = neuron_factory.create_synapse(photoreceptor, post, strength=10)

        driver_name = "Light activation %f" % strength
        neuron_factory.register_driver(photoreceptor,
            PulseDriver(current=strength, period=period,
                length=length, delay=rest_length, record=True),
            name = driver_name)
        neuron_factory.step(args.iterations)

        if print_photoreceptor: data.append(("Photoreceptor", photoreceptor.get_record()))
        if print_postsynaptic: data.append(("Post", post.get_record()))
    data.append(neuron_factory.get_driver_data(driver_name))
    if not args.silent:
        plot(data, title="Photoreceptor test")
def transmit(strength=0.25, delays=[None, 100]):
    data = []
    neuron_factory = NeuronFactory()
    dendrite_strength = 25
    if args.silent:
        pre_neuron = neuron_factory.create_neuron()
        for delay in delays:
            post_neuron = neuron_factory.create_neuron()
            synapse = neuron_factory.create_synapse(pre_neuron, post_neuron,
                axon_delay=delay, dendrite_strength=dendrite_strength)
    else:
        pre_neuron_name = "pre strength: %f" % strength
        pre_neuron = neuron_factory.create_neuron(probe_name=pre_neuron_name)

        post_neuron_names = []
        for delay in delays:
            name="post delay: %s" % str(delay)
            post_neuron_names.append(name)
            post_neuron = neuron_factory.create_neuron(probe_name=name)
            synapse = neuron_factory.create_synapse(pre_neuron, post_neuron,
                axon_delay=delay, dendrite_strength=dendrite_strength)

    neuron_factory.register_driver(pre_neuron,
        ActivationPulseDriver(activation=strength, period=100, length=1, delay=25))#, decrement=0.01))
    neuron_factory.step(args.iterations)

    #print("Saved %d out of %d cycles." % (neuron_factory.stable_count, neuron_factory.time))

    if not args.silent:
        data.append(neuron_factory.get_probe_data(pre_neuron_name))
        for name in post_neuron_names:
            data.append(neuron_factory.get_probe_data(name))
        plot(data, title="Synaptic transmission")
def start():
    seasonal_fetch('tt0944947', start=1, end=8)
    x_seasons = []
    seasonal_mean_ratings = []

    mean_rating = 0.00
    for i in DATA:
        x_seasons.append(i)
        ep_count = 0
        total_rating = 0.00
        x_episodes = []
        episode_ratings = []
        for j in DATA[i]:
            # plot episode-wise
            x_episodes.append(j['name'])
            episode_ratings.append(j['rating'])

            # process data for season wise
            total_rating += float(j['rating'])
            ep_count += 1
        mean_rating = total_rating / ep_count
        seasonal_mean_ratings.append(round(mean_rating, 2))

        # plot episode-wise data
        plot(p_type='stem', x_axis=x_episodes, points=[float(x) for x in episode_ratings],
             title=f'GOT - {i.lower()} IMDB episode-wise rating',
             x_label='Episode name', y_label='Rating per episode', y_min_lim=3, y_max_lim=10)

    # plot season-wise
    plot(p_type='plot', x_axis=x_seasons, points=[float(x) for x in seasonal_mean_ratings],
         title='GOT - IMDB season-wise ratings (mean)',
         x_label='Season', y_label='Rating per season (mean)', y_min_lim=3, y_max_lim=10)
Example #4
0
def test1():

    data = []
    sample = []
    num = 25  #number of elements
    # Feature set containing (x,y) values of 25 known/training data
    #Create arrays of 25x1 of the range (0, 99)
    x = np.random.randint(0, 100, (num, 1)).astype(np.float32)
    y = np.random.randint(0, 100, (num, 1)).astype(np.float32)

    for i in range(0, num):
        sample.append([x[i][0], y[i][0]])

    arraynp = np.asarray(sample)

    # Labels each one either Red or Blue with numbers 0 and 1
    #Create array of data 25x1 fo the range 0,2
    responses = np.random.randint(0, 2, (num, 1)).astype(np.float32)
    """Example for doing something awesome
    plt.scatter(blue[:,0],blue[:,1],80,'b','s')
    """

    #print sample
    #print arraynp
    #print len(arraynp)

    data.append(arraynp[responses.ravel() == 1])
    data.append(arraynp[responses.ravel() == 0])

    p.plot(data)
    def state_callback(self, t, dt, linear_velocity, yaw_velocity):
        '''
        called when a new odometry measurement arrives approx. 200Hz
    
        :param t - simulation time
        :param dt - time difference this last invocation
        :param linear_velocity - x and y velocity in local quadrotor coordinate frame (independet of roll and pitch)
        :param yaw_velocity - velocity around quadrotor z axis (independet of roll and pitch)

        :return tuple containing linear x and y velocity control commands in local quadrotor coordinate frame (independet of roll and pitch), and yaw velocity
        '''
        self.x = self.predictState(dt, self.x, linear_velocity, yaw_velocity)
        
        F = self.calculatePredictStateJacobian(dt, self.x, linear_velocity, yaw_velocity)
        self.sigma = self.predictCovariance(self.sigma, F, self.Q); 
        position = self.x[0:2]
        markers = self.get_markers()
        self.target_marker_index = self.update_target_marker_index(markers, self.target_marker_index, position)
        
        desired_velocity = self.compute_desired_velocity(markers, self.target_marker_index, position, linear_velocity)
        u = self.compute_control_command(t, dt, position, linear_velocity, np.array([markers[self.target_marker_index]]).T, desired_velocity)
        u_yaw = self.compute_yaw_control_command(t, dt, self.x[2], yaw_velocity, 0, 0)
        plot("x command", u[0]);
        self.visualizeState()        
        return u, u_yaw
def spatial_summation(num_pre = 3):
    data = []

    neuron_factory = NeuronFactory()
    post_neuron = neuron_factory.create_neuron(record=True)

    pre_neurons = []
    for i in xrange(num_pre):

        pre_neuron = neuron_factory.create_neuron(
            neuron_type=NeuronTypes.GANGLION,
            record = True)
        pre_neurons.append(pre_neuron)

        synapse = neuron_factory.create_synapse(pre_neuron, post_neuron,
            dendrite_strength=10, axon_delay=5)

        neuron_factory.register_driver(pre_neuron,
            PulseDriver(current=100, period=100*(i+1),
                length=1, delay=10 + (2*i), record=True))

    neuron_factory.step(args.iterations)

    #print("Saved %d out of %d cycles." % (neuron_factory.stable_count, neuron_factory.time))

    for pre_neuron in pre_neurons:
        data.append(("Pre", pre_neuron.get_record()))
    data.append(("Post", post_neuron.get_record()))
    if not args.silent:
        plot(data, title="Spatial summation test")
def test_gap_junction(strength1 = 0.0015, strength2 = 0.0):
    neuron_factory = NeuronFactory()
    neuron1_name = "G Neuron %f" % strength1
    neuron1 = neuron_factory.create_neuron(probe_name=neuron1_name)

    neuron2_name = "G Neuron %f" % strength2
    neuron2 = neuron_factory.create_neuron(probe_name=neuron2_name)

    neuron3_name = "Control Neuron %f" % strength1
    neuron3 = neuron_factory.create_neuron(probe_name=neuron3_name)

    rest_time = 1000
    driver1 = ConstantDriver(strength1, delay=rest_time)
    driver2 = ConstantDriver(strength2, delay=rest_time)

    neuron_factory.register_driver(neuron1, driver1)
    neuron_factory.register_driver(neuron2, driver2)
    neuron_factory.register_driver(neuron3, driver1)

    neuron_factory.create_gap_junction(neuron1, neuron2, 1.0)

    neuron_factory.step(rest_time+args.iterations)

    data = [neuron_factory.get_probe_data(neuron1_name),
        neuron_factory.get_probe_data(neuron2_name),
        neuron_factory.get_probe_data(neuron3_name)]
    if not args.silent:
        plot(data, title="Gap junction test")
Example #8
0
def main ( ):
    graph = { 1: [ 2, 4, 8 ], 2: [ 7, 3, 1 ], 3: [ 2, 6, 4 ],
              4: [ 1, 3, 5 ], 5: [ 4, 8, 6 ], 6: [ 5, 3, 7 ],
              7: [ 6, 2, 8 ], 8: [ 7, 5, 1 ] }
    certificate = [ 1, 2, 3, 4, 5, 6, 7, 7, 1 ]
    verify_hamiltonian ( graph, certificate )
    plt.plot ( )
Example #9
0
def clustering(args):
    ''' run clustering on a single k'''
    print "Feature Analysis/Clustering Mode: single k"

    feature_holder = featurevector.feature_holder(filename=FEATURE_VECTOR_FILENAME)
    sones_holder = featurevector.feature_holder(filename=SONE_VECTOR_FILENAME)
    k = args.k

    print feature_holder
    mfccs = feature_holder.get_feature('mfcc')

    print sones_holder
    sones = sones_holder.get_feature('sones')

    centroids, distortion = Get_Best_Centroids(k, 1)
    print "Distortion for this run: %0.3f" % (distortion)

    classes,dist = kmeans.scipy_vq(mfccs, centroids)

    # Get the inter class dist matrix
    inter_class_dist_matrix = mir_utils.GetSquareDistanceMatrix(centroids)

    eventBeginnings = feature_holder.get_event_start_indecies()
    # write audio if given -w
    if args.plot_segments:
        PlotWaveformWClasses(k, feature_holder,classes)
    if args.write_audio_results:
        WriteAudioFromClasses(k, feature_holder, classes)

    plot.plot(mfccs, sones, eventBeginnings, centroids, inter_class_dist_matrix, classes)
Example #10
0
def plot_hole_markers(course_id, round_id, hole):
    query = """select lens, fov ,camera_x , camera_y , camera_z , target_x , target_y , target_z , roll from camera
				where course_id = '%s' and hole = '%s' and camera_type = 'hole'""" % (
        course_id, hole)

    cursor.execute(query)
    hole_info = cursor.fetchone()

    query = """select tee_x, tee_y, tee_z, pin_x, pin_y, pin_z from hole_camera
				where course_id = '%s' and hole = '%s' and round_id = '%s'""" % (
        course_id, hole, round_id)

    cursor.execute(query)
    rows = cursor.fetchone()

    tee = plotter.Vector(rows["tee_x"], rows["tee_y"], rows["tee_z"])
    pin = plotter.Vector(rows["pin_x"], rows["pin_y"], rows["pin_z"])

    tee = plotter.plot(tee, hole_info, 1024, 2256)
    pin = plotter.plot(pin, hole_info, 1024, 2256)

    query = """insert into tee_coordinate (hole, course_id, round_id, camera_mode, tee_px_x, tee_px_y) values (%s, '%s', %s, '%s', %s, %s) """ % (
        hole, course_id, round_id, 'hole', tee[0], tee[1])

    cursor.execute(query)
    conn.commit()
    return
    query = """insert into pin_coordinate (hole, course_id, round_id, camera_mode, pin_px_x, pin_px_y) values (%s, '%s', %s, '%s', %s, %s) """ % (
        hole, course_id, round_id, 'hole', pin[0], pin[1])

    cursor.execute(query)
    conn.commit()
def temporal_summation(num_pre = 3):
    data = []

    neuron_factory = NeuronFactory()
    post_neuron_name = "Postsynaptic"
    post_neuron = neuron_factory.create_neuron(probe_name=post_neuron_name)

    pre_neuron_names = []
    for i in xrange(num_pre):
        pre_neuron_name = "Presynaptic %d" % i
        pre_neuron_names.append(pre_neuron_name)

        pre_neuron = neuron_factory.create_neuron(
            neuron_type=NeuronTypes.GANGLION,
            probe_name = pre_neuron_name)

        synapse = neuron_factory.create_synapse(pre_neuron, post_neuron,
            dendrite_strength=0.0015, axon_delay=500)
        synapse.set_enzyme_concentration(0.5)

        neuron_factory.register_driver(pre_neuron,
            ActivationPulseDriver(activation=0.5, period=5000*(i+1),
                length=1, delay=1000 + (100*i), record=True))

    neuron_factory.step(args.iterations)

    print("Saved %d out of %d cycles." % (neuron_factory.stable_count, neuron_factory.time))

    for pre_neuron_name in pre_neuron_names:
        data.append(neuron_factory.get_probe_data(pre_neuron_name))
    data.append(neuron_factory.get_probe_data(post_neuron_name))
    if not args.silent:
        plot(data, title="Temporal summation test")
    def compute_visibility(self):
        #       coord=SkyCoord(ra.flatten(),dec.flatten(),unit=u.deg)
        #       m=Visibility().for_time("2016-10-09T17:31:06",coord=coord)

        #      figure(figsize=(20,10))
        #      scatter(ra,dec,s=100,c=m,lw=0,alpha=0.4)

        visibility = Visibility()

        gwm = healpy.read_map(rootd + "/" + self.skymap)
        nsides = healpy.npix2nside(gwm.shape[0])
        vmap = visibility.for_time(self.utc, nsides=nsides)

        healpy.mollview(
            gwm * (vmap * 2 - 1),
            title="visibility for INTEGRAL due to sun constrains\n" + self.utc)

        healpy.projscatter(self.ra, self.dec, lonlat=True)
        healpy.graticule()
        plot.plot(self.dir + "/visibility.png")

        healpy.write_map(self.dir + "/visibility.fits", vmap)

        source_theta = (90 - self.dec) / 180 * pi
        source_phi = (self.ra) / 180 * pi

        visibility = dict(probability_visible=sum(gwm * vmap),
                          source_visible=vmap[healpy.ang2pix(
                              nsides, source_theta, source_phi)])

        json.dump(visibility, open(self.dir + "/visibility.json", "w"))
Example #13
0
def test1():


    data = [] 
    sample = []
    num = 25 #number of elements
    # Feature set containing (x,y) values of 25 known/training data
    #Create arrays of 25x1 of the range (0, 99)
    x = np.random.randint(0,100,(num,1)).astype(np.float32)
    y = np.random.randint(0,100,(num,1)).astype(np.float32)
    
    for i in range(0,num): 
        sample.append([x[i][0], y[i][0]])

    
    arraynp = np.asarray(sample)

    # Labels each one either Red or Blue with numbers 0 and 1
    #Create array of data 25x1 fo the range 0,2
    responses = np.random.randint(0,2,(num,1)).astype(np.float32)

    """Example for doing something awesome
    plt.scatter(blue[:,0],blue[:,1],80,'b','s')
    """

    
    #print sample
    #print arraynp
    #print len(arraynp)

    data.append(arraynp[responses.ravel() ==1 ])
    data.append(arraynp[responses.ravel() ==0 ])
    
    p.plot(data)
Example #14
0
def main():
    cidades = []
    pontos = []

    with open('./data/chn31.tsp') as f:  # leitura das cidades
        for linha in f.readlines():
            cidade = linha.split(' ')
            cidades.append(
                dict(index=int(cidade[0]), x=int(cidade[1]), y=int(cidade[2])))
            pontos.append((int(cidade[1]), int(cidade[2])))

    matriz_adjacencia = []
    rank = len(cidades)
    for i in range(rank):  # calculo da matriz de adjacencia
        linha = []
        for j in range(rank):
            linha.append(calc_distancia(cidades[i], cidades[j]))
        matriz_adjacencia.append(linha)

    aco = ACO(cont_formiga=100, geracoes=10, alfa=1.0, beta=10.0, ro=0.5, Q=10)
    grafo = Grafo(matriz_adjacencia, rank)
    try:
        caminho, custo = aco.resolve(grafo)
        print('custo total: {}, caminho: {}'.format(custo, caminho))
        if args.plot:
            plot(pontos, caminho)
    except TypeError:
        pass
    def plot(self, subname, per_request=False):
        plot.clear_plot()
        if per_request:
            plot.plot([
                self._result['hour_statistic'][subname][x] /
                self._result['hour_statistic']['requests'][x]
                for x in sorted(self._result['hour_statistic'][subname].keys())
            ],
                      #fill=True
                      )
        else:
            plot.plot([
                self._result['hour_statistic'][subname][x]
                for x in sorted(self._result['hour_statistic'][subname].keys())
            ],
                      #fill=True
                      )

        plot.width(int(tsz[0] * SIZE_GRAPH))
        plot.height(int(tsz[1] * SIZE_GRAPH))

        plot.nocolor()
        plot.xlabel('hours')
        plot.axes(True, False)

        plot.show(tsz)
Example #16
0
def main():
    cities = []
    cost_matrix = []

    with open(settings.CITIES_DISTANCE) as f:
        data = json.load(f)

    for k, v in data.items():
        x, y = v['point']
        cities.append((y, x, k))
        cost_matrix.append([city['distance'] for city in v['cities']])

    aco = ACO(
        ant_count=20,
        run_without_improvement=20,
        alpha=2,
        beta=5,
        rho=0.5,
        q=5,
        pheromone_strategy='ant_density')

    graph = Graph(cost_matrix)
    path, cost = aco.solve(graph)
    print('cost: {}, path: {}'.format(cost, path))
    plot(cities, path)
Example #17
0
def main():
    accounts = {}

    accounts.update(
        yahoo_finance.load("csv_files/yahoo_finance/xrp_usd.csv", "XRP/USD", 0,
                           3))
    accounts.update(
        yahoo_finance.load("csv_files/yahoo_finance/btc_usd.csv", "BTC/USD", 0,
                           20000))
    accounts.update(
        yahoo_finance.load("csv_files/yahoo_finance/eth_usd.csv", "ETH/USD", 0,
                           1400))
    accounts.update(
        yahoo_finance.load("csv_files/yahoo_finance/cad_usd.csv", "CAD/USD",
                           .5, 1))
    accounts.update(
        yahoo_finance.load("csv_files/yahoo_finance/gspc.csv", "S&P 500", 1500,
                           3500))

    accounts.update(
        big_query.load("csv_files/big_query/xrp_activity.csv",
                       "/r/XRP Activity", 0, 400))

    # adds a line for total value of all accounts.
    #create_total(accounts)

    plot(accounts)
Example #18
0
def initial_conditions(delta_x):
    name = "initital_conditions"
    r = 100.
    vel = 16.
    big_mass = 1000.
    planets = []
    planets.append(Planet([0., 0.], [0., 0.], big_mass, stationary=True))
    planets.append(Planet([1.2 * r, 0.], [0., 0.], big_mass, stationary=True))
    planets.append(
        Planet([.6 * r, .6 * r], [0., 0.], big_mass, stationary=True))
    probe = Planet([0.8 * r, 0., 0.4 * r], [-0.3 * vel, -0.2 * vel, 0.2 * vel],
                   0.)
    probe2 = copy.deepcopy(probe)
    probe2.pos[0] *= (1 + delta_x)
    probe3 = copy.deepcopy(probe)
    probe3.pos[0] *= (1 + delta_x**2)
    planets.append(probe)
    planets.append(probe2)
    planets.append(probe3)
    remove_momentum(planets)
    centralize(planets)
    dump_planets(planets, 1000., active_planets=3, one_over_r=False)
    data = run_simulation(debug=False)
    #np.savetxt("butterfly.txt", data, fmt='%.4f')
    plot.plot(data, 1.5 * r, interval=1, disabled_trajectories=(-1, -2))
Example #19
0
def main():
    '''
    Die Parameter werden beim Aufruf aus dem config-File abgerufen
    usage: >> miniTopSim.py <ConfigFile>
    '''  
    if(len(sys.argv) == 2 ):
        configFileName = str(sys.argv[1])
    else: 
        sys.stderr.write('Error: usage: '+ sys.argv[0] + ' <filename.cfg>')
        sys.stderr.flush()
        exit(2)
    
    # Read the parameter file
    par.init()
    par.read(configFileName)
    
    # Listen xvals und yvals werden erzeugt
    xvals,yvals = init_values()
   
    with open(par.INITIAL_SURFACE_FILE,"w") as file:
        #Werte zum Zeitpunkt t=0
        write(file, 0 , xvals,yvals)
        # Start time measurement
        startTime = time.clock()
        SurfaceProcess(xvals,yvals,file)
        # Stop time measurement and print it
        endTime = time.clock()
        print("Calculation Time: " + str(endTime - startTime) + " seconds")
        
    plot.plot(par.INITIAL_SURFACE_FILE)
Example #20
0
def generate_plot(dfs, initial_values, metric, race, district, subtract_1E5,
                  out_folder):
    fig = plt.figure()
    values = dfs[metric][race][district]
    if subtract_1E5:
        values = values - dfs[metric][race][district + '_1E5'].values
    initial_value = initial_values[metric][race][district]
    width = values.index[1] - values.index[0]

    # inefficient to recompute this within the loop, but whatever.
    x_bounds = [
        get_x_bounds(values.index.values, values[district].values),
        (99999999, initial_values[metric][race][district])
    ]
    mins, maxes = zip(*x_bounds)
    x_bounds = min(mins), 1.1 * max(maxes)

    line_color = 'red' if district.endswith('Rep') else 'purple'

    plot(values.index.values, values[district].values, x_bounds, width,
         initial_value, line_color, fig)

    sub15_string = '_sub1E5' if subtract_1E5 else ''
    fn = '_'.join((metric, race, district)) + sub15_string + '.png'
    plt.title(' - '.join((TITLES[district], race)))
    plt.savefig(os.path.join(out_folder, fn))
    plt.close()
Example #21
0
def main():
    N = 100
    iterations = 1000
    stateLookup = defaultdict(lambda: defaultdict(set))
    nestLookup = {i: set() for i in range(M)}
    ants = [Ant('exploration', 'at-nest', 0, 0, i, 0, None) for i in range(N)]
    print(ants)
    for ant in ants:
        stateLookup[ant.state][ant.substate].add(ant.i)
        nestLookup[ant.current].add(ant.i)
    history = []
    taskHistory = []
    for _ in range(iterations):
        ants = list(
            map(lambda a: update(a, ants, stateLookup, nestLookup), ants))
        history.append({k: len(v) for k, v in nestLookup.items()})
        taskDict = {
            k: sum(map(len, v.values()))
            for k, v in stateLookup.items()
        }
        taskDict.update({k: 0 for k in states if k not in taskDict})
        pprint(taskDict)
        taskHistory.append(taskDict)
    pprint(history)
    pprint(taskHistory)
    plot(history)
    plot(taskHistory)
def transmit(strength=0.25, delays=[None, 100]):
    data = []
    neuron_factory = NeuronFactory()
    dendrite_strength = 1
    if args.silent:
        pre_neuron = neuron_factory.create_neuron()
        for delay in delays:
            post_neuron = neuron_factory.create_neuron()
            synapse = neuron_factory.create_synapse(pre_neuron, post_neuron,
                delay=delay, strength=dendrite_strength)
    else:
        pre_neuron = neuron_factory.create_neuron(record=True)

        post_neurons = []
        for delay in delays:
            post_neuron = neuron_factory.create_neuron(record=True)
            post_neurons.append(post_neuron)
            synapse = neuron_factory.create_synapse(pre_neuron, post_neuron,
                delay=delay, strength=dendrite_strength)

    neuron_factory.register_driver(pre_neuron,
        PulseDriver(current=strength, period=100, length=1, delay=25))
    neuron_factory.step(args.iterations)

    #print("Saved %d out of %d cycles." % (neuron_factory.stable_count, neuron_factory.time))

    if not args.silent:
        data.append(("Pre neuron", pre_neuron.get_record()))
        for post_neuron in post_neurons:
            data.append(("Post neuron", post_neuron.get_record()))
        plot(data, title="Synaptic transmission")
Example #23
0
def main():

	epochs=int(sys.argv[1])
	print(epochs,' epochs')

	X_train, y_train, X_val, y_val, X_test, y_test = load_data()

	for i in range(0,1):
		im.display_image(X_train[i,:],image_size)

	g, X_train, y_train, X_val, y_val, X_test, y_test = preprocess_data(X_train, y_train, X_val, y_val, X_test, y_test)
	print('X_train.shape ',X_train.shape,'y_train.shape ',y_train.shape)
	print('X_val.shape ',  X_val.shape,  'y_val.shape ',  y_val.shape)
	print('X_test.shape ', X_test.shape, 'y_test.shape ', y_test.shape)

	# learn the model
	model=make_model()
	hist=fit(model , g, X_train, y_train, X_val, y_val, epochs)
	print(hist.history)

	# test the model
	pred = predict(model,X_test,y_test)
	accuracy=compute_accuracy(pred,y_test)
	print('accuracy on test data: ',accuracy*100, '%')
	show_results(pred,X_test,y_test)

	# save learned weights
	f="%d-%m-%y"
	filename='record/weights-'+dt.date.today().strftime(f)
	model.save_weights(filename,overwrite=True)

	pl.plot(hist.history,len(hist.history['acc']))
	os.system('./plot.sh')
Example #24
0
    def SGD(self, epochs, lr):
        """Train the neural network using mini-batch stochastic
        gradient descent.  The ``training_data`` is a list of tuples
        ``(x, y)`` representing the training inputs and the desired
        outputs.  The other non-optional parameters are
        self-explanatory.  If ``test_data`` is provided then the
        network will be evaluated against the test data after each
        epoch, and partial progress printed out.  This is useful for
        tracking progress, but slows things down substantially."""
        n_test = len(self.test_data)
        # n = len(self.training_data)
        for j in range(epochs):
            self.reset_grad()
            for row in self.training_data:
                self.backprop(row)
            self.update_mini_batch(lr)
            # print("Epoch {0}: {1}".format(j, self.evaluate(self.test_data) * 100 / n_test))
        print("Epoch {0}: {1}".format(
            j,
            self.evaluate(self.test_data) * 100 / n_test))
        result = []
        for data in self.test_data:
            x = 0
            if self.feedforward(data) > 0.5:
                x = 1
            result.append([data[0], data[1], x])

        x0, y0, x1, y1 = categorize_result(result)
        plot(x0, y0, x1, y1)
Example #25
0
def main():
    sc = [0.05, 0.05, 10.0]

    ns, ms = [], []
    md1s, md2s = [], []
    ints = []

    end = '0'
    while end == '0':
        ns.append(int(input("Input N: ")))
        ms.append(int(input("Input M: ")))

        p = float(input("Enter parameter (tao): "))

        md1s.append(
            int(input("Outer integration mode (0 - Gauss, 1 - Simpson): ")))

        md2s.append(
            int(input("Inner integration mode (0 - Gauss, 1 - Simpson): ")))

        lm = [[0, pi / 2], [0, pi / 2]]

        ints.append(
            inter.Integrator(lm, [ns[-1], ms[-1]], [md1s[-1], md2s[-1]]))

        print("Result with {:.2f} as "
              "a parameter is {:.7f}".format(p, ints[-1](p)))

        end = input("Stop execution?: ")

    plot.plot(ints, sc, ns, ms, md1s, md2s)
def kp(x, y, k, stop=100000, log=False, logFile=None, step=50, verbose=False):
    n=len(x)
    w=np.zeros(n)
    b=y[0]
    w[0] = y[0] #initialisation de w
    ind=0
    if ((not logFile) or (step <= 0)):
        log=False

    valeurs = [y[i]*(w[0]*k(x[i], x[0]) + b) for i in xrange(n)]
    negatifs = [i for i in xrange(n) if valeurs[i] < 0]
    while(negatifs and (stop > 0)):
        if (log and not (ind % step)):
            sFile=logFile+str(int(ind/step))+'.png'
            sTitle='Kernel perceptron at '+str(ind)+'-th iteration'
            plot.plot(x, y, np.dot(w, x), b, title=sTitle, saveFile=sFile, display=False)
        if (verbose and not (ind % step)):
            print 'itération : ', ind
        j=negatifs[0]
        w[j] = w[j] + y[j]
        b = b + y[j]
        deltaValeurs = [y[i]*(y[j]*k(x[i], x[j]) + y[j]) for i in xrange(n)]
        valeurs=np.add(valeurs, deltaValeurs)
        negatifs = [i for i in xrange(n) if valeurs[i] < 0]
        stop -= 1
        ind+=1
    if (stop==0):
        print 'Kernel perceptron didn\'t converge !'
    return w, b, ind
 def flatFieldScan(self, count=None):
     try:
         self._busy=True
         if count is not None:
             self.setRequiredPixelCountForFlatFieldCalibration(int(count))
         self.prepareFlatFieldCollection()
         print "starting 2 one minute scans to calculate how long to scan for a complete flat field calibration at energy %s" % str(self.energy.getPosition())
         #collect two 1min frames
         self.scanFlatField(2,self.quickscantime+10)
         averagecount=averageScanRawCount(2, self.detector)
         numberofscan=int(math.ceil(self.requiredpixelcount/averagecount*self.quickscantime/self.slowscantime))
 
         print "starting %d flat field calibration scans. Total time to complete is %f seconds." % (numberofscan, (self.slowscantime+30)*numberofscan)
         self.motor.setSpeed((float(self.getUpperGdaLimits()[0])-float(self.getLowerGdaLimits()[0]))/self.slowscantime)
         self.scanFlatField(numberofscan, self.slowscantime+30)
         
         print "Sum all scanned raw data into one flat field data file..."
         self.sum_flat_field_file = sumScanRawData(numberofscan)
         #plot and view flat field raw data in SWING GUI
         try:
             plot(RAW,self.sum_flat_field_file)
         except:
             print "Plot flat field data from .raw data file failed."
             print "Unexpected error:", sys.exc_info()[0], sys.exc_info()[1]  # @UndefinedVariable
                         
         print "Please check the flat field file for any dead pixels, etc.and check that all the bad channels are in the bed channel list at "+BAD_CHANNEL_LIST
         #apply this flat field correction to PSD in GDA permanently
         self.applyFlatFieldCalibration()
     except:
         print "Flat field Collection aborted."
         print "Unexpected error:", sys.exc_info()[0], sys.exc_info()[1]  # @UndefinedVariable
     finally:
         print "Flat Field Collection Completed."
         self.stop()
         self._busy=False
Example #28
0
def clustering(args):
    ''' run clustering on a single k'''
    print "Feature Analysis/Clustering Mode: single k"

    feature_holder = featurevector.feature_holder(
        filename=FEATURE_VECTOR_FILENAME)
    sones_holder = featurevector.feature_holder(filename=SONE_VECTOR_FILENAME)
    k = args.k

    print feature_holder
    mfccs = feature_holder.get_feature('mfcc')

    print sones_holder
    sones = sones_holder.get_feature('sones')

    centroids, distortion = Get_Best_Centroids(k, 1)
    print "Distortion for this run: %0.3f" % (distortion)

    classes, dist = kmeans.scipy_vq(mfccs, centroids)

    # Get the inter class dist matrix
    inter_class_dist_matrix = mir_utils.GetSquareDistanceMatrix(centroids)

    eventBeginnings = feature_holder.get_event_start_indecies()
    # write audio if given -w
    if args.plot_segments:
        PlotWaveformWClasses(k, feature_holder, classes)
    if args.write_audio_results:
        WriteAudioFromClasses(k, feature_holder, classes)

    plot.plot(mfccs, sones, eventBeginnings, centroids,
              inter_class_dist_matrix, classes)
Example #29
0
def main():
    data = read()
    red = list(filter(lambda x: x[4], data))
    blue = list(filter(lambda x: not x[4], data))
    plot.plot(list(map(lambda x: x[1], red)), list(map(lambda x: x[2], red)),
              list(map(lambda x: x[3], red)), list(map(lambda x: x[1], blue)),
              list(map(lambda x: x[2], blue)), list(map(lambda x: x[3], blue)))
 def scan(self):
       
     initial_setpoint = self._get_control_f()
     scan_min = max(self._min_control,initial_setpoint - self._scan_range/2.)
     scan_max = min(self._max_control,initial_setpoint + self._scan_range/2.)
     steps=int((scan_max - scan_min) / self._control_step_size)
     print initial_setpoint,scan_min,scan_max, steps
     udrange=np.append(np.linspace(initial_setpoint,scan_min,int(steps/2.)),
             np.linspace(scan_min, scan_max, steps))
     udrange=np.append(udrange,np.linspace(scan_max,initial_setpoint,int(steps/2.)))
     values=np.zeros(len(udrange))
     true_udrange=np.zeros(len(udrange))
     for i,sp in enumerate(udrange):
         #print 'sp',sp
         self._set_control_f(sp)
         qt.msleep(self._dwell_time)
         true_udrange[i]=self._get_control_f()
         values[i]=self.get_value()
         
     valid_i=np.where(values>self._min_value)
     
     if self.get_do_plot():
         p=plt.plot(name=self._plot_name)
         p.clear()
         plt.plot(true_udrange[valid_i],values[valid_i],'O',name=self._plot_name)
     
     return (true_udrange[valid_i],values[valid_i])
Example #31
0
def plotTriage(tests=["RAMINDEX", "KCOV"]):
    data = {}
    for test in tests:
        #__data, minimizeAttempts = loadDataCached('triage_%s.cache', test, __processTest);
        __data, minimizeAttempts = __processTest(test)
        print(len(__data), __data[-1] if len(__data) > 0 else -1)
        # Triaging
        data = {
            "Total":
            [(v["executeCount"], v["ts"], v["triagingTotal"]) for v in __data],
            "Wasted":
            [(v["executeCount"], v["ts"], v["triagingFail"]) for v in __data],
        }
        plot(data,
             0,
             2,
             xlabel="Programs executed",
             ylabel="Number",
             title="",
             outfile="triage_total_%s.png" % test)
        # Minimization total
        if "Default" in test:
            print(test)
            # MLMinimize(minimizeAttempts)
            # analyzeMinimize(test, minimizeAttempts)
            exit()
Example #32
0
    def state_callback(self, t, dt, linear_velocity, yaw_velocity):
        '''
        called when a new odometry measurement arrives approx. 200Hz

        :param t - simulation time
        :param dt - time difference this last invocation
        :param linear_velocity - x and y velocity in local quadrotor coordinate frame (independet of roll and pitch)
        :param yaw_velocity - velocity around quadrotor z axis (independent of roll and pitch)

        :return tuple containing linear x and y velocity control commands in local quadrotor coordinate frame (independent of roll and pitch), and yaw velocity
        '''
        self.t = t
        self.x = self.predictState(dt, self.x, linear_velocity, yaw_velocity)

        F = self.calculatePredictStateJacobian(dt, self.x, linear_velocity, yaw_velocity)
        self.sigma = self.predictCovariance(self.sigma, F, self.Q);

        self.verify_wp()

        self.visualizeState()

        wp = self.next_wp()
        if wp is not None:
            controls = np.array([20.0, 0.0]).T, self.Kp_psi * (-self.x[2, 0])
        else:
            controls = np.zeros(2,1), 0.0
        #plot("x", self.x[0, 0])
        plot("vx", linear_velocity[0])

        #print controls[0], controls[1]
        return controls
Example #33
0
def run(num_cities):
    cities = []
    points = []
    with open('./data/chn31.txt') as f:
        counter = 0
        for line in f.readlines():
            if counter < num_cities:
                city = line.split(' ')
                cities.append(
                    dict(index=int(city[0]), x=int(city[1]), y=int(city[2])))
                points.append((int(city[1]), int(city[2])))
                counter += 1
    cost_matrix = []
    rank = len(cities)
    for i in range(rank):
        row = []
        for j in range(rank):
            row.append(distance(cities[i], cities[j]))
        cost_matrix.append(row)
    aco = ACO(10, 100, 1.0, 10.0, 0.5, 10, 2)
    graph = Graph(cost_matrix, rank)
    path, cost = aco.solve(graph)
    print('number cities: {},cost: {}, path: {}'.format(
        num_cities, cost, path))
    plot(points, path)
    return path, cost
Example #34
0
def main():
    print('Start!')
    cities = []
    points = []
    print('Reading graph')
    with open('input/att48.txt') as f:
        for line in f.readlines():
            city = line.split(' ')
            cities.append(City(int(city[1]), int(city[2])))
            points.append((int(city[1]), int(city[2])))
    cost_matrix = []
    rank = len(cities)
    for i in range(rank):
        row = []
        for j in range(rank):
            row.append(cities[i].distance(cities[j]))
        cost_matrix.append(row)

    aco = ACS_HMM(n=1000, m=10, alpha=0.1, beta=5, rho=0.1, phi=0.1, q_zero=0.9)
    graph = Graph(cost_matrix, rank)
    print('Solving TSP-ACS-HMM')
    start_time = time.time()
    path, cost = aco.solve(graph)
    print("--- %s seconds ---" % (time.time() - start_time))
    print('Final cost: {}'.format(cost))
    print('Path: {}'.format(path))
    plot(points, path)
def transmit(strengths=[-255, -100, -50, -10, 0]):
    data = []
    neuron_factory = NeuronFactory()
    dendrite_strength = 100
    if args.silent:
        photoreceptor = neuron_factory.create_neuron(neuron_type=NeuronTypes.PHOTORECEPTOR)
        horizontal = neuron_factory.create_neuron(neuron_type=NeuronTypes.HORIZONTAL)
        synapse = neuron_factory.create_synapse(photoreceptor, horizontal,
            dendrite_strength=dendrite_strength)
    else:
        photoreceptor = neuron_factory.create_neuron(neuron_type=NeuronTypes.PHOTORECEPTOR, record=True)
        horizontal = neuron_factory.create_neuron(neuron_type=NeuronTypes.HORIZONTAL, record=True)
        synapse = neuron_factory.create_synapse(photoreceptor, horizontal,
            dendrite_strength=dendrite_strength)
        synapse = neuron_factory.create_synapse(horizontal, photoreceptor,
            transporter=Transporters.GABA, receptor=Receptors.GABA,
            dendrite_strength=dendrite_strength)

    for strength in strengths:
        driver = ConstantDriver(current=strength, delay=100)
        neuron_factory.register_driver(photoreceptor, driver)
        neuron_factory.step(args.iterations)

    #print("Saved %d out of %d cycles." % (neuron_factory.stable_count, neuron_factory.time))

    if not args.silent:
        data.append(("Photoreceptor", photoreceptor.get_record()))
        data.append(("Horizontal Cell", horizontal.get_record()))
        plot(data, title="Horizontal Cell Test")
Example #36
0
    def onSyncDone(sync_result, sl, t0, plop_collector, theseus):
        # TODO should write this to a result file
        print "GOT SYNC RESULT: ", sync_result
        t1 = datetime.now()
        if sl:
            sl.stop()
        if plop_collector:
            from plop.collector import PlopFormatter
            formatter = PlopFormatter()
            plop_collector.stop()
            if not os.path.isdir('profiles'):
                os.mkdir('profiles')
            with open('profiles/plop-sync-%s' % GITVER, 'w') as f:
                f.write(formatter.format(plop_collector))
        if theseus:
            with open('callgrind.theseus', 'wb') as outfile:
                theseus.write_data(outfile)
            theseus.uninstall()

        delta = (t1 - t0).total_seconds()
        # TODO should write this to a result file
        print "[+] Sync took %s seconds." % delta
        reactor.stop()

        if args.do_plot:
            from plot import plot
            plot(args.logfile)
def performIRL(method, trajectoryPerClusterWeight):
    #theta = maxent.irl(feature_matrix, gw.n_actions, discount,
    #gw.transition_probability, trajectories, epochs, learning_rate,trajectoryPerClusterWeight)

    if (method == "linear"):
        print("linear method")
        theta = maxent.irl(feature_matrix, ow.n_actions, discount,
                           ow.transition_probability, trajectories, epochs,
                           learning_rate, trajectoryPerClusterWeight)
    elif (method == "deep"):
        print("deep method")
        l1 = l2 = 0
        theta = deep_maxent.irl(
            (feature_matrix.shape[1], ) + network_structure,
            feature_matrix,
            ow.n_actions,
            discount,
            ow.transition_probability,
            trajectories,
            epochs,
            learning_rate,
            trajectoryPerClusterWeight,
            l1=l1,
            l2=l2)

    recovered_reward = feature_matrix.dot(theta).reshape((n_states, ))

    scaler = StandardScaler()
    standardised_reward = scaler.fit_transform(recovered_reward.reshape(-1, 1))

    plot.plot(ground_r, standardised_reward, grid_size)

    return theta
Example #38
0
def main():
    methods = {
        'insert_sort': insert_sort,
        'bubble_sort': bubble_sort,
        'selection_sort': selection_sort,
        'quick_sort': quick_sort,
        'merge_sort': merge_sort,
        'heap_sort': heap_sort,
        'shell_sort': shell_sort,
        'counting_sort': counting_sort,
        'radix_sort': radix_sort,
        'bucket_sort': bucket_sort
    }
    method_time = {}
    for method_name in methods:
        elapsed_times = 0
        for i in range(100):
            np.random.seed(seed=i)
            datas = list(np.random.randint(0, 500, 1000))
            start = time.time()
            methods[method_name](datas)
            end = time.time()
            elapsed_times += end - start
        print("{0} mean_elapsed_time:{1}".format(method_name, elapsed_times /
                                                 100) + "[sec]")
        method_time[method_name[:-5]] = round(elapsed_times, 3)
    plot(method_time)
Example #39
0
def main():
    env_name = file_name = "Environments/Banana_Linux/Banana.x86_64"
    train_mode = True  # Whether to run the environment in training or inference mode
    env = UnityEnvironment(file_name=env_name, no_graphics=False)
    # env = UnityEnvironment(file_name="/data/Banana_Linux_NoVis/Banana.x86_64")
    # Set the default brain to work with
    brain_name = env.brain_names[0]
    brain = env.brains[brain_name]
    env_info = env.reset(train_mode=True)[brain_name]
    # Action and Observation spaces
    nA = brain.vector_action_space_size
    nS = env_info.vector_observations.shape[1]
    print('Observation Space {}, Action Space {}'.format(nS, nA))
    seed = 7
    agent = Priority_DQN(nS, nA, seed, UPDATE_EVERY, BATCH_SIZE, BUFFER_SIZE,
                         MIN_BUFFER_SIZE, LR, GAMMA, TAU, CLIP_NORM, ALPHA)
    agent.qnetwork_local.load_state_dict(torch.load('checkpoint.pth'))
    # scores = train(agent,env,brain_name)
    for i in range(1):
        state = env.reset()
        img = plt.imshow(env.render(mode='rgb_array'))
        for j in range(500):
            action = agent.act(state)
            img.set_data(env.render(mode='rgb_array'))
            plt.axis('off')
            display.display(plt.gcf())
            display.clear_output(wait=True)
            state, reward, done, _ = env.step(action)
            # save the image
            plt.savefig('test' + str(j) + '.png', bbox_inches='tight')
            if done:
                break
    # plot the scores
    plot(scores)
Example #40
0
def plotCorrDiff(array1, array2, title, filepath):
    """
    A function to produce plots of correlation differences.

    Parameters:
    -----------
    array1 : observational dataset.  Output of corr()
    array2 : modelled dataset. Output of corr()
    """
    corrDiff_array = corrDiff(array1, array2)
    if corrDiff_array != None:
        Dict6 = mapCorr()
        myplot = plot(corrDiff_array,
                      Dict6,
                      labels=False,
                      grid=False,
                      oceans=False,
                      cbar=True)
        reload(maps_sub)
        from maps_sub import saveFig
        saveFig(myplot, title, filepath)
    else:
        array = np.zeros((27, 22))
        Dict6 = mapCorr()
        myplot = plot(array,
                      Dict6,
                      labels=False,
                      grid=False,
                      oceans=False,
                      cbar=True)
        reload(maps_sub)
        from maps_sub import saveFig
        saveFig(myplot, title, filepath)
    return
Example #41
0
    def onSyncDone(sync_result, sl, t0, plop_collector, theseus):
        # TODO should write this to a result file
        print "GOT SYNC RESULT: ", sync_result
        t1 = datetime.now()
        if sl:
            sl.stop()
        if plop_collector:
            from plop.collector import PlopFormatter
            formatter = PlopFormatter()
            plop_collector.stop()
            if not os.path.isdir('profiles'):
                os.mkdir('profiles')
            with open('profiles/plop-sync-%s' % GITVER, 'w') as f:
                f.write(formatter.format(plop_collector))
        if theseus:
            with open('callgrind.theseus', 'wb') as outfile:
                theseus.write_data(outfile)
            theseus.uninstall()

        delta = (t1 - t0).total_seconds()
        # TODO should write this to a result file
        print "[+] Sync took %s seconds." % delta
        reactor.stop()

        if args.do_plot:
            from plot import plot
            plot(args.logfile)
Example #42
0
    def state_callback(self, t, dt, linear_velocity, yaw_velocity):
        '''
        called when a new odometry measurement arrives approx. 200Hz
    
        :param t - simulation time
        :param dt - time difference this last invocation
        :param linear_velocity - x and y velocity in local quadrotor coordinate frame (independet of roll and pitch)
        :param yaw_velocity - velocity around quadrotor z axis (independet of roll and pitch)

        :return tuple containing linear x and y velocity control commands in local quadrotor coordinate frame (independet of roll and pitch), and yaw velocity
        '''
        self.x = self.predictState(dt, self.x, linear_velocity, yaw_velocity)

        F = self.calculatePredictStateJacobian(dt, self.x, linear_velocity,
                                               yaw_velocity)
        self.sigma = self.predictCovariance(self.sigma, F, self.Q)
        position = self.x[0:2]
        markers = self.get_markers()
        self.target_marker_index = self.update_target_marker_index(
            markers, self.target_marker_index, position)

        desired_velocity = self.compute_desired_velocity(
            markers, self.target_marker_index, position, linear_velocity)
        u = self.compute_control_command(
            t, dt, position, linear_velocity,
            np.array([markers[self.target_marker_index]]).T, desired_velocity)
        u_yaw = self.compute_yaw_control_command(t, dt, self.x[2],
                                                 yaw_velocity, 0, 0)
        plot("x command", u[0])
        self.visualizeState()
        return u, u_yaw
Example #43
0
def plotter(time, domain, data_path=None, ids=None):
    if data_path is None:
        data_path = base_data_path + '/' + time[:-3]
    if not os.path.exists(data_path):
        os.makedirs(data_path)

    plot_time = parse_time(time)
    #run_time = plot_time - timedelta(hours=1)

    _file = download_era5(plot_time, data_path, domain)
    _radar_file = download_radar(plot_time, data_path)

    # Determine what data needs to be plotted and passed
    #data = pygrib.open(_file)
    #lats, lons = data[1].latlons()

    data = xr.open_dataset(_file)
    lats = data.latitude.values
    lons = data.longitude.values
    lons, lats = np.meshgrid(lons, lats)

    if domain is not None:
        plot_bounds = domains[domain]
        plot(data, lons, lats, plot_bounds, domain, plot_time, _radar_file)

    if ids is not None:
        try:
            ids = ids.strip().split(',')
        except:
            raise ValueError(
                "Improperly formatted sounding locations: ORD,MDW,DPA")
        print(ids)
        get_soundings(data, ids, lats, lons, plot_time)

    return
Example #44
0
def main():
    cities = []
    points = []
    # Se lee el archivo .txt se encuentran en orden: el índice de la ciudad, su coordenada en X y su coordenada en Y
    with open(
            '/home/juancm/Desktop/Octavo/Tesis/ProyectoGrado/Metaheuristicas/ant-colony-tsp/ant-colony-tsp/data/chn31.txt'
    ) as f:
        for line in f.readlines():
            city = line.split(' ')
            cities.append(
                dict(index=int(city[0]), x=int(city[1]), y=int(city[2])))
            points.append((int(city[1]), int(city[2])))

    # Se genera la matriz de costos a partir de los nodos y las coordenadas leídas
    cost_matrix = []
    rank = len(cities)
    for i in range(rank):
        row = []
        for j in range(rank):
            row.append(distance(cities[i], cities[j]))
        cost_matrix.append(row)
    # Se instancia ACO, en donde se envía como parámetro: la cantidad de ants, el número de generaciones, alpha, beta, rho, Q, Estrategia para calccular T(i,j)
    aco = ACO(10, 100, 1.0, 10.0, 0.5, 10, 2)
    graph = Graph(cost_matrix, rank)
    path, cost = aco.solve(graph)
    print('cost: {}, path: {}'.format(cost, path))
    plot(points, path)
Example #45
0
def plotCorpus(tests=["KCOV", "RAMINDEX"]):
    datas = {}
    tmax = 0
    cmax = 0
    for test in tests:
        data = {}
        #__data, t, c = __processTest(test);
        __data, t, c = loadDataCached("corpus_%s.cache", test, __processTest)
        tmax = max(tmax, t)
        cmax = max(cmax, c)
        if len(__data) < 1:
            continue
        data[test] = [(v[0], v[1], v[2]) for v in __data]
        data[test + " FP"] = [(v[0], v[1], v[3]) for v in __data]
        datas[test] = data
    for test in datas:
        datas[test][test].insert(0, (0, 0, 0))
        datas[test][test + " FP"].insert(0, (0, 0, 0))
        datas[test][test].append((cmax, tmax, datas[test][test][-1][2]))
        datas[test][test + " FP"].append(
            (cmax, tmax, datas[test][test + " FP"][-1][2]))
        plot(datas[test],
             1,
             2,
             xlabel="Time elapsed (s)",
             ylabel="# of corpus",
             title="",
             outfile="corpus_%s.png" % test,
             xmax=tmax)
def test_grid(image=simple_image):
    height = len(image)
    width = len(image[0])

    neuron_factory = NeuronFactory(num_threads=1)
    neuron_grid = neuron_factory.create_neuron_grid(width, height, base_current=0.0)

    neuron_data = []

    for i in xrange(height):
        for j in xrange(width):
            neuron_factory.register_driver(
                neuron_grid[i][j],
                ConstantDriver(activation=image[i][j]))

    for _ in xrange(args.iterations):
        neuron_data.append(neuron_grid[0][0].soma.get_scaled_voltage())
        neuron_factory.step()
    neuron_factory.close()

    activity = []
    for row in neuron_grid:
        activity +=  \
            [neuron.soma.get_scaled_voltage()
                for neuron in row]
    print(activity)

    im = Image.new('L', (width, height))
    im.putdata(activity)
    im.save('test.png')

    if not args.silent:
        plot([("neuron", neuron_data)], title="Photoreceptor test")
Example #47
0
def main():
    cities = []
    points = []
    with open('./data/ulysses16.tsp') as f:
        for line in f.readlines():
            #print(line)
            city = line.split(' ')
            #print(city)
            cities.append(dict(index=float(city[0]), x=float(city[1]), y=float(city[2])))
            #print(cities)
            points.append((float(city[1]), float(city[2])))
    cost_matrix = []
    rank = len(cities)
    #print(rank)
    for i in range(rank):
        row = []
        for j in range(rank):
            row.append(distance(cities[i], cities[j]))
        cost_matrix.append(row)
    #print('cost_matrix',cost_matrix)
    aco = ACO(100,1, 1.0, 10.0, 0.5, 10, 2)
    graph = Graph(cost_matrix, rank)
    path, cost = aco.solve(graph)
    cost=cost+cost_matrix[path[-2]][path[-1]]
    print('cost: {}, path: {}'.format(cost, path))
    plot(points, path)
    def train(self):
        """
        We stack and store the stacks as observations for critic training, 
        but keep the states and next states seperate for actor actions.
        """
        tic = time.time()
        means = []
        stds = []
        steps = 0
        scores_window = deque(maxlen=100)
        for e in range(1, self.episodes):

            self.noise.step()
            episode_scores = []
            obs = self.env.reset()
            for t in range(self.tmax):
                actions = self.act(obs)
                next_obs, rewards, dones = self.env.step(actions)

                # Store experience
                if np.max(rewards) > 0:
                    print('hit the ball over the net', rewards)
                self.R.add(obs.reshape(1, 48), obs, actions, rewards,
                           next_obs.reshape(1, 48), next_obs, dones)
                obs = next_obs
                # Score tracking
                episode_scores.append(np.max(rewards))

            # Learn
            if len(self.R) > self.min_buffer_size:
                for _ in range(self.SGD_epoch):
                    # Update each agent
                    for i in range(self.num_agents):
                        self.learn(i)
                    # update target networks
                    self.update_targets_all()

            steps += int(t)
            means.append(np.mean(episode_scores))
            stds.append(np.std(episode_scores))
            scores_window.append(np.sum(episode_scores))
            if e % 4 == 0:
                toc = time.time()
                r_mean = np.mean(scores_window)
                r_max = max(scores_window)
                r_min = min(scores_window)
                r_std = np.std(scores_window)
                plot(self.name, means, stds)
                print(
                    "\rEpisode: {} out of {}, Steps {}, Rewards: mean {:.2f}, min {:.2f}, max {:.2f}, std {:.2f}, Elapsed {:.2f}"
                    .format(e, self.episodes, steps, r_mean, r_min, r_max,
                            r_std, (toc - tic) / 60))
            if np.mean(scores_window) > self.winning_condition:
                print('Env solved!')
                # save scores
                pickle.dump([means, stds],
                            open(str(self.name) + '_scores.p', 'wb'))
                # save policy
                self.save_weights(self.critic_path, self.actor_path)
                break
Example #49
0
def run():
    def noise(sigma):
        import numpy
        return numpy.random.normal(0, sigma, 1).item(0) if sigma > 0 else 0;
    
    import math
    from plot import plot
    u = 0
    m = 2.0 # mass
    x = 0
    vx = 0
    ax = 0
    t = 0
    t_max = 30
    dt = 0.01
    steps = int(t_max / dt) + 1;
    setpoint = 5
    
    measurement_noise = 0.03 if enable_noise else 0
    measurement_delay = 20 if enable_delay else 1
    control_noise = 0.0
    control_delay = measurement_delay
    
    u_limit = 100
    x_measurements = [x + noise(measurement_noise) for i in range(measurement_delay)]
    vx_measurements = [vx + noise(measurement_noise) for i in range(measurement_delay)]
    
    control = [0] * control_delay;
    user = UserCode()
    
    stable_t = t_max;
    overshoot = 0
    
    for i in range(steps):
        current = x_measurements.pop(0)
        vel = vx_measurements.pop(0)
        
        control.append(user.compute_control_command(t, dt, current, setpoint) + noise(control_noise))
        
        diff = setpoint - current;
        diff_norm = math.sqrt(diff * diff);
        if(diff_norm > 3.5 * measurement_noise + 0.01):
            stable_t = t;
        overshoot = max(overshoot, max(current - setpoint, 0));
        
        u = max(min(control.pop(0), u_limit), -u_limit) # input = force
        
        t += dt;
        x += dt * vx
        vx += dt * ax
        
        ax = u / m
        
        x_measurements.append(x + noise(measurement_noise))
        vx_measurements.append(vx + noise(measurement_noise))
        plot("x_measured", x)
        plot("x_desired", setpoint)
    print "stable after:", stable_t, "overshoot:", overshoot
    return (stable_t, overshoot)
 def measurement_callback(self, t, dt, navdata):
     '''
     :param t: time since simulation start
     :param dt: time since last call to measurement_callback
     :param navdata: measurements of the quadrotor
     '''
     # add your plot commands here
     plot("roll", navdata.rotX);
    def optimize_matisse(self,lt1_optimize, lt2_optimize):
        print 'Optimizing Matisse'
        #scan the matise in the preset range around the current setpoint, find optima for lt1 and lt2 cr counts
        optimum_lt1=-1.0
        optimum_lt2=-1.0
        
        initial_setpoint = self._ins_pidmatisse.get_setpoint()
        
        self._get_input_values()
        qt.msleep(self._par_mat_wait_time)
        dt, cr_checks_lt1, cr_cts_lt1, cr_failed_lt1, cr_checks_lt2, cr_cts_lt2, cr_failed_lt2, tpqi_starts, tail_cts = self._get_input_values()
        
        initial_cr_cts_per_check_lt1=-1.0
        if cr_checks_lt1>0: initial_cr_cts_per_check_lt1=cr_cts_lt1/cr_checks_lt1
        initial_cr_cts_per_check_lt2=-1.0
        if cr_checks_lt2>0: initial_cr_cts_per_check_lt2=cr_cts_lt2/cr_checks_lt2
        
        lt1_cts_per_check=[]
        lt2_cts_per_check=[]
        
        freq_scan_min = max(self._par_min_setpoint_mat,initial_setpoint - self._par_mat_scan_range/2.)
        freq_scan_max = min(self._par_max_setpoint_mat, initial_setpoint + self._par_mat_scan_range/2.)
        self._ins_pidmatisse.set_setpoint(freq_scan_min)
        qt.msleep(6.*self._par_mat_wait_time)
        self._get_input_values()
        for freq in linspace(freq_scan_min, freq_scan_max, int((freq_scan_max - freq_scan_min) / self._par_mat_stepsize)):
            self._ins_pidmatisse.set_setpoint(freq)
            qt.msleep(self._par_mat_wait_time*.5)
            cur_freq=freq
            if not(self._func_matisse_freq == None): cur_freq = (self._func_matisse_freq() - self._ins_pidmatisse.get_value_offset())* self._ins_pidmatisse.get_value_factor()
            qt.msleep(self._par_mat_wait_time*.5)
            dt, cr_checks_lt1, cr_cts_lt1, cr_failed_lt1, cr_checks_lt2, cr_cts_lt2, cr_failed_lt2, tpqi_starts, tail_cts = self._get_input_values()
            if cr_checks_lt1>0: lt1_cts_per_check.append((cr_cts_lt1/cr_checks_lt1,cur_freq))
            if cr_checks_lt2>0: lt2_cts_per_check.append((cr_cts_lt2/cr_checks_lt2,cur_freq))
            
        print 'Initial cr_cts_per_check: lt1:',initial_cr_cts_per_check_lt1, 'lt2:', initial_cr_cts_per_check_lt2
        #print 'Scan: lt1:', lt1_cts_per_check ,'lt2:', lt2_cts_per_check
        self._ins_pidmatisse.set_setpoint(initial_setpoint)
        
        if ((lt1_optimize) and (len(lt1_cts_per_check)>0) and (max(lt1_cts_per_check)[0] >=  initial_cr_cts_per_check_lt1)):
            #if self._par_fit:
            #    print 'Fitting not yet implemented'
            #    pass
            #    #a, xc, k = copysign(max(y), V_max + V_min), copysign(.5, V_max + V_min), copysign(5., V_max + V_min)
            #    #fitres = fit.fit1d(x,y, common.fit_AOM_powerdependence, a, xc, k, do_print=True, do_plot=False, ret=True)
            optimum_lt1=max(lt1_cts_per_check)[1]
            print 'Setting new setpoint matisse:', optimum_lt1
            self._ins_pidmatisse.set_setpoint(optimum_lt1)
        
        if ((lt2_optimize) and (len(lt2_cts_per_check)>0) and (max(lt2_cts_per_check)[0] >=  initial_cr_cts_per_check_lt2)):
            optimum_lt2=max(lt2_cts_per_check)[1]
        
        self._plt.clear()
        self._plt=plt.plot(lt2_cts_per_check)
        self._plt=plt.plot(lt1_cts_per_check)
        qt.msleep(4.*self._par_mat_wait_time)

        return optimum_lt1, optimum_lt2
def plots(startNumber, endNumber, commonSuffix="mythen_summed.dat", dataType=PSD):
    firstPlot=True
    datadir=PathConstructor.createFromDefaultProperty()
    for filenumber in range(startNumber, endNumber+1):
        filename=str(filenumber)+"-"+commonSuffix
        if firstPlot:
            plot(dataType, os.path.join(datadir,filename))
            firstPlot=False
        else:
            plotover(dataType, os.path.join(datadir,filename))
Example #53
0
def test():
  import numpy as np
  import plot as p;
  reload(p)
  data = np.random.rand(100,200);
  p.plot(data, 'test')
  
  reload(p)
  pts  = np.random.rand(10000,3);
  p.plot3d(pts);
Example #54
0
def test2():

    #All the sample
    data = load_data_set("../flowers.csv")
    
    #The training set is going to be the 99 %
    training_set = get_training_set(0.99, data)

    #The test set
    test_set = get_test_set(training_set, data) 


    #This give me the kind of plants
    kind_of_plants = data["Family"].unique()
    
    """This prints the data per family
    for k in kind_of_plants:
        print (k)
        print (data[data["Family"] == k])
    """


    k= 3
    #This columns are the selected features  for doing the euclidean distance.
    #columns = ["a", "b", "c", "d"]
    columns = ["a", "b"]
    predictions = knn(training_set, test_set, k, columns)

    get_locations_neighbors(predictions, columns)
    
        

    
    """ Print  the data classified by the selected columns
    for k in kind_of_plants:
        print (k)
        sol = data[data["Family"] == k]
        print sol[columns]
    """

    #Only it's possible to plot in 2d on this project
    if (len(columns) == 2):
        data_plot = []

        #Add to the plot data refers to the training data
        for k in kind_of_plants:    
            sol = training_set[training_set["Family"] == k]
            data_plot.append(sol.as_matrix(columns))

        #Add to the plot data referst to the test data
        for k in kind_of_plants:   
            sol = test_set[test_set["Family"] == k]
            data_plot.append(sol.as_matrix(columns))

    p.plot(data_plot)
Example #55
0
    def output_plots(self, plotdir):
        benchmap = self.benchmap
        benchnames = benchmap.values()
        benchnames.sort()
        benchinv = {}
        for k, v in benchmap.items():
            benchinv[v] = k

        for o in self.out_plots:
            basename = o[0]
            cfglist = o[1]

            clist = []
            for cfgspec in cfglist:
                r = re.compile('^' + cfgspec.replace('*', '.*') + '$')
                for c in self.configlist:
                    if r.match(c): clist.append(c)

            metric = o[2]

            popt = {'AVG': False, 'GEOMEAN': False}
            for opt in o[3]:
                popt[opt] = True

            data = []

            for benchname in benchnames:
                bench = benchinv[benchname]
                if bench in self.badbenches: continue
                if not self.vals.has_key(bench): continue

                row = [bench]

                for c in clist:
                    keyname = c + '.' + metric
                    if self.vals[bench].has_key(keyname):
                        val = self.vals[bench][keyname]
                    else:
                        val = None
                    if val == None: val = 0.0

                    row.append(val)

                data.append(row) 

            data = plot.add_avg(data, popt['AVG'], popt['GEOMEAN'])
            if len(o) > 4:
                opts = o[4]
            else:
                opts = {}

            plot.write_gnuplot_file(plotdir + '/' + basename, clist, metric, metric, opts)
            plot.write_data_file(plotdir + '/' + basename, data, clist)
            plot.plot(plotdir + '/' + basename)
Example #56
0
    def plotMetricsOnceOverPeriod(self, metrics, days, startdate=datetime.datetime.today()):
        dates, lines = self.evaluateMetricsOverPeriod(metrics, startdate, days)
        linelist = [
            (lines['Net Worth'], 'g-'),
            (lines['Net Assets'], 'b-'),
            (lines['Net Liabilities'], 'r-'),
        ]

        plot.plot(dates, linelist)

        return lines['Net Worth'].pop()
Example #57
0
def main():
    '''
    Es handelt sich um Isotropes Aetzen einer Oberflaeche entlang der Winkelsymmetrale (1nm/s):
    Es werden zwei Parameter ueber die command line uebergegeben, die Zeit t ueber die 
    geaetzt wird und die Zeitschrittweite dt.
    
    usage: >> miniTopSim.py t dt
    
    Außerdem werden die Koordinaten ins eine Datei geschrieben in dem Format:
    
    surface: time, npoints, x-positions y-positions
    x[0] y[0]
    x[1] y[1]
    ...
    x[npoints-1] y[npoints-1]
    
    '''
    
    # Falls weniger oder mehr als zwei parameter uebergegeben werden,
    # kommt eine Fehlermeldung die Die richtige Eingabe deutet
    if(len(sys.argv) == 2 ):
        configFileName = str(sys.argv[1])
    else: 
        sys.stderr.write('Error: usage: '+ sys.argv[0] + ' <filename.cfg>')
        sys.stderr.flush()
        exit(2)
    
    # Read the parameter file
    par.init()
    par.read(configFileName)
    
    # Listen xvals und yvals werden erzeugt
    xvals,yvals = init_values(par.XMIN, par.XMAX, par.DELTA_X)
        
    # Die Oberflaeche zum Zeipunkt t=0 wird geplotet
    # plotten(xvals,yvals,'bo-','Anfangszeitpunkt')
    
    # Es wird ein File erzeugt mit der Name 'basic_t_dt.srf', wobei t und dt durch 
    # die Tatsaechliche Zeit und Zeitschrittweite ersetz werden. Außerdem wird in die Datei
    # die Oberflaeche zum Zeitpunkt t=0 reingeschrieben (xvals und yvals in spalten)
    file = open('basic_{0}_{1}.srf'.format(par.TOTAL_TIME, par.TIME_STEP),"w")
    write(file, 0 , xvals,yvals)
    
    aetzen(file, par.TOTAL_TIME, par.TIME_STEP, xvals, yvals)
    
    file.close()
    
    # Die Oberflaeche zum Endzeitpunkt wird geplotet
    # plotten(xvals,yvals,'ro-','Endzeitpunkt')
    
    fname = 'basic_{0}_{1}.srf'.format(par.TOTAL_TIME, par.TIME_STEP)
    plot.plot(fname)
def external_current(rs=[-5, 2, 3, 5]):
    data = []
    for r in rs:
        neuron_factory = NeuronFactory()
        neuron = neuron_factory.create_neuron(record=True)

        neuron_factory.register_driver(neuron,
            PulseDriver(current=r, period=1, length=1, delay=args.iterations/10))
        neuron_factory.step(args.iterations)

        data.append(("Current %f" % r, neuron.get_record()))
    if not args.silent:
        plot(data, title="External current")
Example #59
0
def testClassification(sampleSize,x,y,w):
    goodC = 0
    badC = 0
    for i in xrange(2*sampleSize):
#        print classify(x[i],w),y[i]
#        print "valeur: ", printClassify(x[i],w)
        if classify(x[i],w)==y[i]:
            goodC +=1
        else:
            badC += 1
#    print "Good : ", goodC
#    print "Bad : ", badC
    print "rate : ", goodC/sampleSize/2
    plot.plot(x, y, w[:-1], w[-1])
def external_current(rs=[-2, -1, 0, 1]):
    data = []
    for r in rs:
        neuron_factory = NeuronFactory()
        neuron_name = "current: %f" % r
        neuron = neuron_factory.create_neuron(probe_name=neuron_name)

        neuron_factory.register_driver(neuron,
            CurrentPulseDriver(current=r, period=1, length=1, delay=args.iterations/10))
        neuron_factory.step(args.iterations)

        data.append(neuron_factory.get_probe_data(neuron_name))
    if not args.silent:
        plot(data, title="External current")