Example #1
0
 def apply(self, *args, **kwargs):
     ''' applying of tasks
         optional:
             show_plot - showing of the plot via matplotlib
             tasks - getting plot for list of tasks
     '''
     if len(self._tasks) == 0:
         raise NoTasksException('Tasks is not defined')
     print('Loaded tasks: ')
     default_tasks = []
     for t in self._tasks:
         print(t.title())
         default_tasks.append(t.title())
     m = Metrics([t.run(self._session) for t in self._tasks])
     self._metrics = m
     tasks = kwargs.get('tasks') if 'tasks' in kwargs else default_tasks
     for task in tasks:
         data = m[task]
         self._metrics_store[task] = MetricsStore(task, \
             self._metric_names, \
             self._method_names, \
             m,
             task
             )
         if kwargs.get('show_plot'):
             show(data['planning_time'], data['execution_time'])
Example #2
0
def seccion4():
    f1 = lambda x: x*0
    g1 = lambda x: math.sin(math.pi*(x+1))
    l1 = lambda t: t*0
    r1 = lambda t: t*0
    c1=lambda x: 2
    t_max=1
    alpha1=1
    alpha0=0
    beta1=1
    beta0=0
    edptype=1
    soltype=1
    h=0.004
    k=0.001
    P1 = {"xmin":-1, "xmax":1, "tmin":0, "tmax":t_max  , "c":c1,"f":f1,"g":g1, "l":l1, "r":r1}
    x1,t1,u1=diferenciacion(P1,h,k,edptype,soltype,alpha1,beta1)#dir
    x2,t2,u2=diferenciacion(P1,h,k,edptype,soltype,alpha0,beta0)#neu
    x3,t3,u3=diferenciacion(P1,h,k,edptype,soltype,alpha1,beta0)#mix1
    x4,t4,u4=diferenciacion(P1,h,k,edptype,soltype,alpha0,beta1)#mix2
    x5,t5,u5=diferenciacion(P1,h,k,edptype,soltype,0.5,0.5)#robin
    plot.show(x1,t1,u1)
    plot.show(x2,t2,u2)
    plot.show(x3,t3,u3)
    plot.show(x4,t4,u4)
    plot.show(x5,t5,u5)
Example #3
0
def simulate_skin(steps=5, max_iter=100, learning_rate=0.1):
    """Simulate learning skin data set."""
    data = read_data('Skin_NonSkin.txt')
    train_data, test_data = split_list(data, 0.75)

    start = len(train_data)/steps  # First step training set size.
    end = len(train_data)  # Final step training set size.

    sizes = []  # Training data set sizes.
    success = []  # Success rates according to training data set sizes.
    for i in xrange(steps):
        # Increase training data size according to iteration.
        size = start + i*end/steps
        current_train_data = train_data[:size]

        w = train(current_train_data, max_iter=max_iter, r=learning_rate)
        error = test(test_data, w)

        status(current_train_data, test_data, error)
        print

        # Record size-success statistics.
        sizes.append(size)
        success.append(100 - error)

    plot_success_per_size(sizes, success)
    show()
    def plot(self, subname, per_request=False):
        plot.clear_plot()
        if per_request:
            plot.plot([
                self._result['hour_statistic'][subname][x] /
                self._result['hour_statistic']['requests'][x]
                for x in sorted(self._result['hour_statistic'][subname].keys())
            ],
                      #fill=True
                      )
        else:
            plot.plot([
                self._result['hour_statistic'][subname][x]
                for x in sorted(self._result['hour_statistic'][subname].keys())
            ],
                      #fill=True
                      )

        plot.width(int(tsz[0] * SIZE_GRAPH))
        plot.height(int(tsz[1] * SIZE_GRAPH))

        plot.nocolor()
        plot.xlabel('hours')
        plot.axes(True, False)

        plot.show(tsz)
Example #5
0
def plot(entCodes,
         bPivotPoints=True,
         bVolumes=True,
         bRSI=True,
         bLinRegress=False):
    """
    Plot a predefined set of data wrt each entCode in the given list.
    """
    datas = [['srel', 'srel.MetaLabel'], ['roll3Y', 'roll3Y.MetaLabel'],
             ['retOn', 'rosaf.MetaLabel']]
    entDB = edb.gEntDB
    if type(entCodes) == str:
        entCodes = [entCodes]
    for entCode in entCodes:
        entIndex = entDB.meta['codeD'][entCode]
        entName = entDB.meta['name'][entIndex]
        print("\n\nEntity: {:20} {}".format(entCode, entName))
        ops.print_pivotpoints('pp', entCode, "PivotPntsD")
        ops.print_pivotpoints('ppW', entCode, "PivotPntsW", False)
        ops.print_pivotpoints('ppM', entCode, "PivotPntsM", False)
        for d in datas:
            print("{:10} {}".format(d[0], entDB.data[d[1]][entIndex]))
        _plot(entCode,
              bPivotPoints=bPivotPoints,
              bVolumes=bVolumes,
              bRSI=bRSI,
              bLinRegress=bLinRegress)
        eplot.show()
Example #6
0
def main(**kwargs):
    v = {-1, +1, +2}  # Set of votes.
    n = 5000      # Number of iterations.

    g = three_leaders(v)

    measures = {j: [] for j in v.union({0})}
    
    # Main cycle.
    for i in range(n):
        simulation.iteration(g, v)

        for j in v.union({0}):
            measures[j].append(formulae.occupation_measure(g, j))

        # Log.
        if 'log' in kwargs:
            kwargs['log'](i, g, v)

    # Plot.
    if kwargs.get('plot', False):
        colours = {-1: 'r', 0: 'g', +1: 'b', +2: 'y'}
        cesaro = {x: formulae.cesaro(y) for x, y in measures.items()}

        plot.lines(measures, colours)
        plot.lines(cesaro, colours)

        plot.show()
Example #7
0
def run():
  #goImpulse(1,1,fname=dirFig+'FirstDerivative.pdf') # creates the impulse response for different Fornberg's filters for first derivatives
  #goImpulse(2,2,fname=dirFig+'SecondDerivative.pdf') # creates the impulse response for different Fornberg's filters for second derivatives
#  goError(3)  # prints the tables for convergence analysis
#  goPluto(5)  # shows the Pluto related figures
  simple()
  plt.show() 
Example #8
0
def simulate_seperable(data_size):
    """Simulate learning a completely seperable data set."""
    data = generate_sphere_data(10000, margin=0)
    train_data, test_data = split_list(data, 0.75)
    w = train(train_data, max_iter=500, r=0.01)
    error = test(test_data, w)
    status(train_data, test_data, error)

    plot_data(data)
    plot_w(data, w)
    show()
Example #9
0
def demo_plot_sin():
    t = np.arange(0.0, 2.0, 0.01)
    s = 1 + np.sin(2 * np.pi * t)
    fig, ax = get_axis_fig()
    plot(ax, t, s)
    set_labels(ax,
               x_label='time (s)',
               y_label='voltage (mV)',
               title='About as simple as it gets, folks')
    set_grid(ax)
    show()
Example #10
0
def test_adaptive_grid():
    '''Test adaptive grid'''
    dir_path = os.path.dirname(os.path.abspath(__file__))
    
    plot.init('Isotropic Etching with Adaptive Grid')
        
    for time, surface in main(os.path.join(dir_path, 'test_adaptive_grid.cfg')):
        if time == 0:
            plot.add_surface(surface, 'initial surface')

    plot.add_surface(surface, 'time = ' + str(time))
    plot.show()
Example #11
0
def test_plot_from_file_fill_between(mock_plt):
    my_plot.plt.figure()
    my_plot.plot_from_file(file_name="00_25",
                           param_name="return",
                           limit_x_range='steps_return')
    my_plot.show()

    # Assert plt.fillbetween has been called
    mock_plt.fill_between.assert_called_once()

    # Assert plt.figure got called
    assert mock_plt.figure.called
Example #12
0
def demo(name, mu, sigma, strategies):
    t0 = time()
    Q, R, A = multistrat(mu=mu,
                         sigma=sigma,
                         strategies=strategies,
                         epochs=1100)
    multiplot(A, R, strategies)
    plt.ylim(1)
    print name, "ran in", time() - t0, "s"
    show(name.replace(' ', '_'))

    plot_qs(Q, mu, strategies)
    show(name.replace(' ', '_') + "-q")
Example #13
0
def test_plot_from_file_xy_label(mock_plt):
    my_plot.plt.figure()
    my_plot.plot_from_file(file_name="00_25",
                           param_name="return",
                           limit_x_range='steps_return')
    my_plot.show()

    # Assert plt.xlabel and y has been called with expected title arg
    mock_plt.xlabel.assert_called_once_with("steps_return")
    mock_plt.ylabel.assert_called_once_with("return")

    # Assert plt.figure got called
    assert mock_plt.figure.called
Example #14
0
def simulate_increasing(data_size, margin=0.3, max_iter=100, learning_rate=0.1,
                        steps=5, start=None, end=None):
    """Simulate learning an increasing training data set.

    Generates an unseperable data set, and trains on an increasing training
    set, then tests and plots.

    start: Initial (first step) training data set size.
    end: Final (last step) training data set size.
    """
    data = generate_sphere_data(data_size, margin=margin)
    train_data, test_data = split_list(data, 0.75)

    # Initialize start/end sizes if not given.
    start = len(train_data)/steps if start is None else start
    end = len(train_data) if end is None else end

    w_colors = ['b', 'c', 'm', 'y', 'k']  # w vector (line) graph color.
    w_gs = []  # w plot graphs.
    sizes = []  # Training data set sizes.
    success = []  # Success rates according to training data set sizes.
    for i in xrange(steps):
        # Increase training data size according to iteration.
        size = start + i*end/steps
        current_train_data = train_data[:size]

        w = train(current_train_data, max_iter=max_iter, r=learning_rate)
        error = test(test_data, w)

        status(current_train_data, test_data, error)
        print

        # Record size-success statistics.
        sizes.append(size)
        success.append(100 - error)

        # Plot decision boundary.
        w_color = w_colors[i] if i < len(w_colors) else w_colors[-1]
        figure(0)
        g, = plot_w(current_train_data, w, color=w_color)
        w_gs.append(g)

    figure(0).suptitle('Test data size: %d\nMaximum iterations: %d' % (len(test_data), max_iter))
    plot_w_legend(w_gs, sizes)
    plot_data(data)

    figure(1).suptitle('Success rate according to training set size.')
    plot_success_per_size(sizes, success)

    show()
Example #15
0
def test_plot_from_file_ranges(mock_plt):
    my_plot.plt.figure()
    range_y = [-1, 1]
    my_plot.plot_from_file(file_name="00_25",
                           param_name="return",
                           limit_x_range='steps_return',
                           limit_x=1000,
                           range_y=range_y)
    my_plot.show()

    # Assert plt.ylim has been called with expected title arg
    mock_plt.ylim.assert_called_once_with(range_y)

    # Assert plt.figure got called
    assert mock_plt.figure.called
Example #16
0
def seccion1():
    f1 = lambda x: np.exp(-200*x**2)
    g1 = lambda x: 0#400*x*np.exp(-200*x**2)
    l1 = lambda t: t*0
    r1 = lambda t: t*0
    c1=lambda x: 1
    t_max=100
    alpha=1
    beta=1
    edptype=1
    soltype=1
    h=0.002
    k=0.001
    P1 = {"xmin":-1, "xmax":1, "tmin":0, "tmax":t_max  , "c":c1,"f":f1,"g":g1, "l":l1, "r":r1}
    x,t,u=diferenciacion(P1,h,k,edptype,soltype,alpha,beta)
    plot.show(x,t,u)
Example #17
0
def seccion5():
    f1 = lambda x: np.exp(-200*x**2)+((x+1)*x)/2.0
    g1 = lambda x: (1.0/2.0)+x-400*np.exp(-200*x**2)*x
    l1 = lambda t: math.sin(t)
    r1 = lambda t: ((math.sin(t))*(math.cos(t)))/t
    c1=lambda x: (1.0/5.0)+((math.sin(x-1))**2)
    t_max=20
    alpha=1
    beta=1
    edptype=1
    soltype=1
    k=0.001
    h=0.004
    P1 = {"xmin":-1, "xmax":1, "tmin":0, "tmax":t_max  , "c":c1,"f":f1,"g":g1, "l":l1, "r":r1}
    x,t,u=diferenciacion(P1,h,k,edptype,soltype,alpha,beta)
    plot.show(x,t,u)
Example #18
0
def test_4_plot_files_colors(mock_plt):
    my_plot.plt.figure()
    my_plot.plot_files(['00_25', '25_50', '50_75', '75_100'],
                       param_name='return',
                       last_N=100,
                       limit_x=None,
                       limit_x_range='steps_return',
                       range_y=None,
                       y_ticks=None,
                       legend=True)

    my_plot.show()

    assert mock_plt.plot.call_count == 4

    # Assert plt.figure got called
    assert mock_plt.figure.called
Example #19
0
def test_plot_from_file_yticks(mock_plt):
    my_plot.plt.figure()
    range_y = [-1, 1]
    y_ticks = 0.01
    my_plot.plot_from_file(file_name="00_25",
                           param_name="return",
                           limit_x_range='steps_return',
                           limit_x=1000,
                           range_y=range_y,
                           y_ticks=y_ticks)
    my_plot.show()

    # Assert plt.yticks has been called with expected title arg
    mock_plt.yticks.assert_called_once()

    # Assert plt.figure got called
    assert mock_plt.figure.called
Example #20
0
def test_4_plot_files_legend(mock_plt):
    my_plot.plt.figure()
    my_plot.plot_files(['00_25', '25_50', '50_75', '75_100'],
                       param_name='return',
                       last_N=100,
                       limit_x=None,
                       limit_x_range='steps_return',
                       range_y=None,
                       y_ticks=None,
                       legend=True)

    my_plot.show()

    # Assert plt.legend has been called with expected title arg
    mock_plt.legend.assert_called_once()

    # Assert plt.figure got called
    assert mock_plt.figure.called
Example #21
0
def seccion3():
    f1 = lambda x: np.exp(-200*x**2)
    g1 = lambda x: 400*x*np.exp(-200*x**2)
    l1 = lambda t: t*0
    r1 = lambda t: t*0
    c1=lambda x: 1
    t_max=1
    alpha=1
    beta=1
    edptype=1
    soltype=1
    h1=0.004
    k1=0.0009
    h2=0.004
    k2=0.001
    h3=0.004
    k3=0.01

    P1 = {"xmin":-1, "xmax":1, "tmin":0, "tmax":t_max  , "c":c1,"f":f1,"g":g1, "l":l1, "r":r1}
    x1,t1,u1=diferenciacion(P1,h1,k1,edptype,soltype,alpha,beta)
    x2,t2,u2=diferenciacion(P1,h2,k2,edptype,soltype,alpha,beta)
    x3,t3,u3=diferenciacion(P1,h3,k3,edptype,soltype,alpha,beta)
    plot.show(x1,t1,u1)#funciona
    plot.show(x2,t2,u2)#funciona
    plot.show(x3,t3,u3)#muere
Example #22
0
File: cli.py Project: djplt/mustat
def main(name, songs, graph):
    '''
  Mustat - Music Statistics.
  CLI to program to generate statistics about music artists.
  '''
    try:
        art = Artist(name, songs)
        click.echo("Artist data downloaded!\n")
        click.echo("**** Summary Artist Data ****")
        click.echo("Artist name: %s" % art.name)
        click.echo("Attempted song downloads %d" % songs)
        click.echo("Successful song downloads %d" % len(art.songs))
        click.echo("Average word length = %.2f" % art.averageWordLength)
        click.echo("Standard deviation of word length = %.2f" %
                   art.stdevWordLength)
        click.echo("Variance of word length = %.2f" % art.varianceWordLength)
        click.echo("Most common word: '%s'" % art.mostCommonword)

        if graph:
            histogram(art.wordLengths,
                      x_label="Word Lengths",
                      y_label="Frequency",
                      title="{} Word Lengths".format(art.name))

            wordBarPlot(art.words,
                        x_label="Words",
                        y_label="Frequency",
                        title="{}'s Top 10 Words".format(art.name))
            show()
    except APINearlyFound as e:
        click.echo("Did you mean %s?" % e.args)
    except APINotFound:
        click.echo(
            "Could not find any artists or songs under the artist name '%s'" %
            name)
    except APIFormatError:
        click.echo("Unexpected API error")
    except Exception as e:
        click.echo("**** An unexpected error occurred ****")
        raise e
Example #23
0
def main():
    w, tw, rate, size, max = wav.readWav("af2.wav")
    Ew = np.sum(np.square(w)) / rate
    Pw = np.sum(np.square(w)) / size
    # Get 10 random intervals and measure the power of the signal
    for i in range(10):
        interval = [random.randint(0, size)]
        interval.append(random.randint(0, size))
        interval.sort()
        Pwi = np.sum(np.square(
            w[interval[0]:interval[1] + 1])) / (interval[1] - interval[0])
        print("Power for " + str(round(interval[0] / float(rate), 5)) +
              "s - " + str(round(interval[1] / float(rate), 5)) + "s = " +
              str(round(Pwi, 5)) + "W")
    # Plot the audio signal
    plt.plot(
        w, tw, rate, 10, "w(t)", "Signal w(t)", "Energy: " +
        str(round(Ew, 3)) + " W, Power: " + str(round(Pw, 3)) + " J")
    # Save the plot into an image file
    if not os.path.exists("img"):
        os.mkdir("img", 0755)
    plt.savefig("img/3-AudioSignal.png")
    plt.show()
Example #24
0
File: core.py Project: alxlhr/FLUOR
    def run(self):
        starttime = timeit.default_timer()

        print(self.state.exp)
        if self.state.exp == "R":
            print("### Calculating Rays ###")
        elif self.state.exp == "TL":
            print("### Calculating TL ###")
        elif self.state.exp == "A":
            print("### Calculating Arrivals ###")
        else:
            raise NameError('Wrong exp name')

        def_arr = np.ones_like(self.state.z[0, :], dtype=bool)
        for i in range(self.state.n_max - 1):
            if (i % 10 == 0):
                print("%i / %i" % (i, self.state.n_max), end='\r')
            loop.ray_step(i, def_arr, self.state.ds0, self.state)
            caustics.step(i, self.state)
            boundary.apply(i, self.state)
            gc.collect()

        loss.calc_normals(self.state)
        if self.state.exp == "TL":
            loss.calc_TL(self.state)
        if self.state.exp == "A":
            arrival.calc_arr(self.state)

        print("Elapsed time : %i sec" % (timeit.default_timer() - starttime))

        if self.state.save == 1:
            IO.save(self.state, self.params)

        if self.state.plot == 1:
            plot.show(self.state)

        gc.collect()
Example #25
0
def seccion2():
    f1 = lambda x: np.exp(-200*x**2)
    g1 = lambda x: 400*x*np.exp(-200*x**2)
    l1 = lambda t: t*0
    r1 = lambda t: t*0
    c1=lambda x: 1
    t_max=1
    alpha=1
    beta=1
    edptype=1
    soltype=1
    h1=0.02
    k1=0.001
    h2=0.004
    k2=0.001
    h3=0.002
    k3=0.001

    P1 = {"xmin":-1, "xmax":1, "tmin":0, "tmax":t_max  , "c":c1,"f":f1,"g":g1, "l":l1, "r":r1}
    tiempo1 = time.clock()
    x1,t1,u1=diferenciacion(P1,h1,k1,edptype,soltype,alpha,beta)
    tiempo2 = time.clock()
    tiempo100=tiempo2-tiempo1

    tiempo3 = time.clock()
    x2,t2,u2=diferenciacion(P1,h2,k2,edptype,soltype,alpha,beta)
    tiempo4 = time.clock()
    tiempo500=tiempo4-tiempo3

    tiempo5 = time.clock()
    x3,t3,u3=diferenciacion(P1,h3,k3,edptype,soltype,alpha,beta)
    tiempo6 = time.clock()
    tiempo1000=tiempo6-tiempo5

    print("el tiempo de 100 es:",tiempo100)
    print("el tiempo de 500 es:",tiempo500)
    print("el tiempo de 1000 es:",tiempo1000)
    plot.show(x1,t1,u1)
    plot.show(x2,t2,u2)
    plot.show(x3,t3,u3)
Example #26
0
f = d.filter()
print f
inp2 = np.convolve(inp,f,'same')
rsf = rsf1darray(0.0,0.1,101)
rsf.rsf = inp2
rsfft = rsffft1(rsf)
fft = rsfft.fft()
fft.rsf = np.abs(fft.rsf)
plt.plot1d(fft,fn=1,fname=None)

inp *= 0.

inp[50] = 1.
inp2 = DerivBackward(DerivForward(inp))
rsf = rsf1darray(0.0,0.1,101)
rsf.rsf = inp2
rsfft = rsffft1(rsf)
fft = rsfft.fft()
fft.rsf = np.abs(fft.rsf)
plt.plot1d(fft,fn=1,fname=None)





plt.show()
#
#
#
#print x,a
Example #27
0
def iterate(M, T, env, prb, path):
    #DQN
    inp = Input(shape=(2*K+2,), dtype=float32)
    #emb = Embedding(input_dim = 2 , output_dim = 64)(inp)
    r = Reshape((1, -1))(inp)#, input_shape=(12,))
    lstm = LSTM(100 , return_sequences=False, activation='relu')(r)
    
    #1
    value = Dense(10 , activation='relu')(lstm)
    v = Dense(20 , activation='relu')(value)
    OutputLayer = Dense(K+1)(v)
    """a = Dense(10 , activation='relu')(lstm)
    #a = Dense(16 , activation='relu')(a)
    ad = []

    for i in range(K + 1):
        ad.append(Dense(1)(a))
        
        
    averaged = average(ad)
    
    advantage = concatenate(inputs = ad , axis=-1)
    
    subtracted = subtract([advantage , averaged])
    OutputLayer = add([value , subtracted])"""
    
    #2
    inp2 = Input(shape=(2*K+2,), dtype=float32)
    #emb = Embedding(input_dim = 2 , output_dim = 64)(inp)
    r2 = Reshape((1, -1))(inp2)#, input_shape=(12,))
    lstm2 = LSTM(100 , return_sequences=False, activation='relu')(r2)
    
    #1
    value2 = Dense(10 , activation='relu')(lstm2)
    v2 = Dense(20 , activation='relu')(value2)
    OutputLayer2 = Dense(K+1)(v2)
    """a2 = Dense(10 , activation='relu')(lstm2)
    #a2 = Dense(16 , activation='relu')(a2)
    ad2 = []

    for i in range(K + 1):
        ad2.append(Dense(1)(a2))
        
        
    averaged2 = average(ad2)
    
    advantage2 = concatenate(inputs = ad2 , axis=-1)
    
    subtracted2 = subtract([advantage2 , averaged2])
    OutputLayer2 = add([value2 , subtracted2])"""
    
    
    DQN1 = Model(inputs = inp , outputs = OutputLayer)
    DQN2 = Model(inputs = inp2 , outputs = OutputLayer2)
    
    #DQN1.compile(loss='mean_squared_error', optimizer='adam', metrics=['mse'])
    #DQN2.compile(loss='mean_squared_error', optimizer='adam', metrics=['mse'])
    
    DQN1.compile(loss='mse', optimizer='adam')
    DQN2.compile(loss='mse', optimizer='adam')
    
    """
    print('Initialization')
    esw = array([1 , 0 , 0 , 0 , 0 , 0])[newaxis]
    dhg = array([0])[newaxis]
    dhgf = array([0,0,0])[newaxis]
    DQN1.fit(esw , dhgf , epochs=8, batch_size=4 , verbose = 0)
    print(DQN1.predict(esw))
    """
    print('START')
    
    N = env.N
    output = env.scheme
    excc = ""
    MA = 0
    maxx = 0
    throughput = 0
    prob = .95
    
    ### Loading DQN's
    #DQN1 = load_model('DQN1.h5')
    
    DQN2.set_weights(DQN1.get_weights())
    replayBufferx = ndarray(shape = (100, M*T*N, 2*K + 2))
    replayBufferQ = ndarray(shape = (100, M*T*N, K + 1))
    
    for it in range(R):
        print('throughput for iteration %i :' % (it))
        tq = ndarray(shape = (N , K + 1))
        OptA = ndarray(shape = (N , 2*K + 2))
        Q = ndarray(shape = (M*T*N , K + 1))
        x = ndarray(shape = (M*T*N , 2*K + 2) , dtype = int)
        act = []
        time = -1
        dd =[]

        for yy in range(15):
            output += '|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||\r\n'
        output += 'at iteration ' + str(it) + '\r\n'
        
        for episode in range(M):
            output += '\r\nEpisode : ' + str(episode) + '\r\n'
            throughput = 0
            data = []
            state = env.reset()
            
            for t in range(T):
                tprime = t
                output += '\r\n  TIME : ' + str(t) + '\r\n'
                data.append(t)
                data[t] = []
                a = np.zeros(N, dtype=int)

                for n in range(N):
                    time += 1
                    
                    """
                    if it < 256:
                        xx = copy(env.observe(n))
                        x[time] = copy(xx)
                        tq[n] = DQN1.predict((xx)[newaxis])
                        rand = np.random.randint(N)
                        #print(rand)
                        if rand%N <= env.K:
                            a = rand
                        else:
                            a = 0
                        """
                        
                    x[time] = copy(state[n])
                    x[time] = (x[time])[newaxis]
                    tq[n] = DQN1.predict((state[n])[newaxis])
                    outputtq = tq[n]
                    p , tq[n] = ActionDistribution(env.K , tq[n] , it, R, path)
                    
                    list_of_candidates = range(K + 1)
                    nn = sum(p)
                    probability_distribution = [x/nn for x in p]
                    number_of_items_to_pick = 1
                    
                    aMax = choice(list_of_candidates, number_of_items_to_pick,
                                  p=probability_distribution)
                    #aMax = argmax(p)
                    #aMax = argmax(tq[n])
                    
                    #prob = exp(-4.7 + 4.95*it/1000) # e^-4.7 =~ 0 e^-0.05 =~ 0.95
                    #prob = 1 - exp((0-it)/(1000/3)) # e^(-3) = .04
                    """"prob = 1 - .1*(.995**it)
                    prob = max(1 - 0.05, prob)
                    prob = prob - (prob%0.001)
                    """
                    #elif t < 5:
                    #    prob = 1 - .4
                    ### competitive: e^(-3)
                    ###              1 - .05
                    
                    ran = random.uniform(0.0 , 1.0)
                    if ran < prob:
                        a[n] = aMax # argmax(ActionDistribution(K , tq[n] , it))
                    else:
                        ran = random.randint(K) # random int between 0 and K-1
                        if ran < aMax:
                            a[n] = ran
                        else:
                            a[n] = ran + 1
                            
                    output += '    ' + str(state[n]) + ' --> Q --> ' + str(outputtq) + ' --> Dist --> ' + str(p) + '\r\n'
            
                ###=========================================================###
                ###                      in T
                ###=========================================================###
                
                output += '    Chosen action: ' + str(a) + '\r\n'
                
                done = False
                if t==T-1:
                    done = True
                _, action, state, reward = env.step(a, done)
                
                
                for i in range(N):
                    output += '    ' + str(state[i]) + '\r\n'
                output += '    reward: ' + str(reward) + '\r\n'
                
                for i in range(N):
                    s = (state[i])[newaxis]
                    Q1 = DQN1.predict(s)
                    Q2 = DQN2.predict(s)
                    tq[i][action[i]] = reward[i] + gamma*Q2[0][argmax(Q1[0])] #action[i]]
                    
                    Q[time - (N-1) + i] = tq[i]
                    if state[i][-1]==1:
                        throughput += (1)
                    data[t].append(action[i])
                    
                    act.append(action[i])
                    
                    output += '    Q2[' + str(action[i]) + ']: ' + str(Q2[0][action[i]])
                    output += ' --> Q[' + str(time - (N-1) + i) + ']: ' + str(Q[time - (N-1) + i]) + '\r\n'
                
            ###=========================================================###
            ###                        in M
            ###=========================================================###
            
            """if env.scheme == 'sum rate' or env.scheme == 'sum-log rate':
                for i in range(T*N):
                    Q[time - i] += reward[0]"""
                
            throughput /= (T*K)
            
            if throughput >= .5:
                excc = '!!!'
            else:
                excc = ''
            
            print("episode %i: " % (episode) , "% 12.2f" % (100*throughput) , excc)
        
            if throughput>maxx:
                DQN1.save(path + 'DQN1.h5')
                maxx = throughput
            
            #figure
            if it >= R-10 or it%100==0 or maxx == throughput or throughput >= 0.65:
                plt.show(data, throughput, it, episode, path)
                dd = data[-1]
            
            if dd != []:
                print(dd)
                dd = []
                
            #MA
            MA += throughput
                     
        ###===============================================================###
        ###                         in iteration
        ###===============================================================###
        
        """if it%100==0:
            f = open("Qs" + str(it) + ".txt" , "w+")
            f.write(output)
            f.close()
            output = ''
            
        if it == 3:
            f = open("Qs3.txt" , "w+")
            f.write(str(Q) + '\r\n')
            for b in range(len(Q)):
                f.write(str(x[b]) + str(Q[b]) + "\r\n")
            f.close()
        """
        
        #replayBufferx[it%100] = x
        #replayBufferQ[it%100] = Q
        
        f = open(path + "history" + " of " + env.scheme + ' ' + str(int(it/1000)) + ".txt" , "a")
        f.write(output)
        f.close()
        output = ''

        #fitting
        #if it >= 99:
            #x = replayBufferx[i]
            #Q = replayBufferQ[i]
        batchSize = M*T*N # len(x)
        DQN1.fit(x , Q , batch_size=batchSize , epochs=1 , verbose = 0) #batch_size=50(time) epochs=10000(iteration) policy=absolutely random
        if it % 5 == 0: # % 80
            DQN2.set_weights(DQN1.get_weights())

        MA /= M
        print("% 12.2f" % (100*MA))
        MA = 0
    
    #Saving
    DQN1.save(path + 'DQN1.h5')
    
    return env.AggregatedReward , throughput , Q[-1] , DQN1
Example #28
0
    for i, output_image in enumerate(output_images):
        f.write("Accuracy on test input " + str(i) +
                "(adjusted): {0: .1%}".format(acc[i]) + '\n')
        test_details_file = open(
            paths['Details Output Path'] + 'test_' + str(i) + '.txt', 'a')
        test_details_file.write("Accuracy on test input " + str(i) +
                                "(adjusted): {0: .1%}".format(acc[i]) + '\n')
        test_details_file.close

if not simulation_parameters['Load/Retrain Model'][0]:
    for i in range(num_train):
        error_train = utils.normalised_rms_error(phase_exact_flat_train[i],
                                                 phase_retrieved_flat_train[i])
        f.write("Accuracy on training input " + str(i) +
                ": {0: .1%}".format(error_train) + '\n')
        train_details_file = open(
            paths['Details Output Path'] + 'train_' + str(i) + '.txt', 'a')
        train_details_file.write("Accuracy on training input " + str(i) +
                                 ": {0: .1%}".format(error_train) + '\n')
        train_details_file.close

f.close()

# Save trained model
if not simulation_parameters['Load/Retrain Model'][0] or simulation_parameters[
        'Load/Retrain Model'][1]:
    saver.save(session, save_model_path + 'model')

#utils.beep()  # Alert user that script has finished
show()  # Prevent plt.show(block=False) from closing plot window
Example #29
0
def demo_plot_from_file():
    plot_from_file(file_name="00_25",
                   param_name="return",
                   limit_x_range='steps_return')
    show()
        )).getI()
        print("    {{%9.6ff, %9.6ff, %9.6ff}," % (m[0, 0], m[0, 1], m[0, 2]))
        print("     {%9.6ff, %9.6ff, %9.6ff}," % (m[1, 0], m[1, 1], m[1, 2]))
        print("     {%9.6ff, %9.6ff, %9.6ff}}," % (m[2, 0], m[2, 1], m[2, 2]))
    print("};")
    print()
    print_code_gen_notice()
    print("const Matrix3f AP_GeodesicGrid::_mid_inverses[10]{")
    for i in range(10):
        a, b, c = ico.triangles[i]
        ma, mb, mc = .5 * (a + b), .5 * (b + c), .5 * (c + a)
        m = np.matrix((
            (ma.x, mb.x, mc.x),
            (ma.y, mb.y, mc.y),
            (ma.z, mb.z, mc.z),
        )).getI()
        print("    {{%9.6ff, %9.6ff, %9.6ff}," % (m[0, 0], m[0, 1], m[0, 2]))
        print("     {%9.6ff, %9.6ff, %9.6ff}," % (m[1, 0], m[1, 1], m[1, 2]))
        print("     {%9.6ff, %9.6ff, %9.6ff}}," % (m[2, 0], m[2, 1], m[2, 2]))
    print("};")

if args.icosahedron:
    print('Icosahedron:')
    for i, t in enumerate(ico.triangles):
        print('    %s' % str(t))
    if args.plot:
        plot.polygons(ico.triangles)

if args.plot:
    plot.show(subtriangles=args.plot_subtriangles)
def detect(img_path):
    model = predict.model()
    results = prepare_image(img_path=img_path, model=model)
    show(img_path=img_path, results=results)
Example #32
0

def get_country_ipinfo(ips: List[str]) -> dict:
    with open("ipinfo_auth", "r") as f:
        ips = map(lambda x: x + "/country", ips)
        auth_code = f.read().strip()
        handler: ipinfo.Handler = ipinfo.getHandler(auth_code)
        results: dict = handler.getBatchDetails(ips)
        print(".")
        return results


if __name__ == "__main__":
    last_result: Dict[str, int] = {}
    ip_list: List[str] = load_ips("ip.txt")

    for i, ip_chunk in enumerate(chunk_ip(ip_list)):
        results: dict = get_country_ipinfo(ip_chunk)
        for result in results:
            country = results[result]
            country_count = last_result.get(country, 0)
            last_result[country] = country_count + 1

    last_result = {
        key: value
        for key, value in reversed(
            sorted(last_result.items(), key=lambda item: item[1]))
    }
    write_result(str(last_result), "results/result.txt")
    plot.show(last_result, len(ip_list))
Example #33
0

if __name__ == '__main__':
    p, q = 42, 37
    # ascii = lambda txt: [x + ' , ' + str(ord(x)) for x in txt]
    string = "abcdefghe" * 100    #This string is for represent time comparision between sequential and parallel algorithm
    string = 'python'             #This string is for authentication
    char, outcome = [], []
    j, sq_time = 0, 0

    for i in string:
        char.append(ord(i[0]))

    # Sequential RSA implementation
    pubkey = key_gen(31, 17)
    for letter in char:
        time1 = sequential.encryp_text(letter, pubkey, 31, 17)
        privkey = receive_privkey()
        plaintext, time2 = sequential.decryp_text(privkey, 31, 17)
        promp = time1 + time2
        sq_time += promp

    # Parallel RSA implementation
    pr_time_start = time.time()
    parallel.prsa(char)
    pr_time_end = time.time()
    pr_time = pr_time_end - pr_time_start

    #plot the relative time
    plot.show(sq_time, pr_time)
Example #34
0
    colors = "rgbky"

    for si, sigma in enumerate([SIGMA1, SIGMA2, SIGMA3]):
        t0 = time()
        Q, R, A = multistrat(mu=MU,
                             sigma=sigma,
                             strategies=strats,
                             epochs=5000)

        fig = plt.figure(figsize=(12, 5))

        ax = fig.add_subplot(spec[0])
        for i, s in enumerate(strats):
            plt.plot(R[i].mean(axis=0),
                     label=s.__name__,
                     alpha=0.5,
                     c=colors[i])
        plt.legend(fontsize=10, loc="lower right")
        plt.title("Average reward over {} runs".format(len(R[0])))

        fig.add_subplot(spec[1], sharey=ax)
        bp = plt.boxplot(R.mean(axis=2).T, labels=[s.__name__ for s in strats])
        for box, color in zip(bp['boxes'], colors):
            box.set_color(color)
        plt.xticks([])
        plt.ylim(3)

        name = "Ex_2_sigma{}".format(si + 1)
        print name, time() - t0, "s"
        show(name)
import plot as plt

# Draw 100000 samples from Normal distribution with stds of interest: samples_std1, samples_std3, samples_std10
samples_std1 = np.random.normal(20, 1, 100000)
samples_std3 = np.random.normal(20, 3, 100000)
samples_std10 = np.random.normal(20, 10, 100000)

# Make histograms
plt.hist(samples_std1, normed=True, histtype='step', bins=100)
plt.hist(samples_std3, normed=True, histtype='step', bins=100)
plt.hist(samples_std10, normed=True, histtype='step', bins=100)

# Make a legend, set limits and show plot
_ = plt.legend(('std = 1', 'std = 3', 'std = 10'))
plt.ylim(-0.01, 0.42)
plt.show()

# Generate CDFs
x_std1, y_std1 = ecdf(samples_std1)
x_std3, y_std3 = ecdf(samples_std3)
x_std10, y_std10 = ecdf(samples_std10)

# Plot CDFs
plt.plot(x_std1, y_std1, marker='.', linestyle='none')
plt.plot(x_std3, y_std3, marker='.', linestyle='none')
plt.plot(x_std10, y_std10, marker='.', linestyle='none')

# Make a legend and show the plot
_ = plt.legend(('std = 1', 'std = 3', 'std = 10'), loc='lower right')
plt.show()
Example #36
0
# Solid objects
center = [L / 2, H / 2]
radius = min(L, H) / 6
circulation_wanted = 90
obs = tools.obstacle(center, radius, circulation_wanted)

tic = time.perf_counter()

# Creation of the World
w = tools.world(opt)
# Computation of the circulation
w.phi_solid = cfd.circulation_computation(obs, opt)
# Print input parameters
cli.input(w, opt)
# Creation of the fluid
f = tools.fluid(w, c, opt)
# Boundary conditions computation
w, f = cfd.boundary(w, f, obs)
# Computation of the fluid dynamic
f, res = cfd.gauss_seidel(w, f, opt)

toc = time.perf_counter() - tic
print("Elapsed time: %1.2fs" % toc)
print("Number of iterations done: %i" % res.iters)
print("Max error archieved: %1.5E" % res.error)

plot.velocity(w, f, obs)
plot.density(w, f, obs)
plot.phi(w, f, obs)
plot.show()
Example #37
0
        print("    {{%9.6ff, %9.6ff, %9.6ff}," % (m[0,0], m[0,1], m[0,2]))
        print("     {%9.6ff, %9.6ff, %9.6ff}," % (m[1,0], m[1,1], m[1,2]))
        print("     {%9.6ff, %9.6ff, %9.6ff}}," % (m[2,0], m[2,1], m[2,2]))
    print("};")
    print()
    print_code_gen_notice()
    print("const Matrix3f AP_GeodesicGrid::_mid_inverses[10]{")
    for i in range(10):
        a, b, c = ico.triangles[i]
        ma, mb, mc = .5 * (a + b), .5 * (b + c), .5 * (c + a)
        m = np.matrix((
            (ma.x, mb.x, mc.x),
            (ma.y, mb.y, mc.y),
            (ma.z, mb.z, mc.z),
        )).getI()
        print("    {{%9.6ff, %9.6ff, %9.6ff}," % (m[0,0], m[0,1], m[0,2]))
        print("     {%9.6ff, %9.6ff, %9.6ff}," % (m[1,0], m[1,1], m[1,2]))
        print("     {%9.6ff, %9.6ff, %9.6ff}}," % (m[2,0], m[2,1], m[2,2]))
    print("};")


if args.icosahedron:
    print('Icosahedron:')
    for i, t in enumerate(ico.triangles):
        print('    %s' % str(t))
    if args.plot:
        plot.polygons(ico.triangles)

if args.plot:
    plot.show(subtriangles=args.plot_subtriangles)
Example #38
0
    return p


#Body
### Loading DQN's
DQN1 = load_model('DQN1.h5')

env2 = Env(N, K, 'competitive')
path = 'results/competitive/11/'

while True:
    FigNum = 0
    #s = input("Exit?[y/n]: ")
    #if s=='y':
    #    break

    for it in range(30):
        thr, data = experience(env2, DQN1)
        FigNum += 1
        print('throughput at experience #', it + 1, ' : ',
              "% 12.2f" % (100 * thr))

        #figure
        plt.show(data, thr, it, path)
        dd = data[-1]

        if dd != []:
            print(dd)
    break
Example #39
0
# Threshold
ax1.axhline(0.0, color="k", ls="-", alpha=0.5)

# U(r)
ax1.plot(r, U, "--k", alpha=0.5, label="Unperturbed ion")

# Laser
ax1.plot(r, laser, "-r", label="Laser")

# U(r) + laser
ax1.plot(r, Ubent, "-k", label="Effective")

# Electron
ax1.plot([0.0], [-Xe_Z0_Ip], "ob", ms=14)
ar1 = fleches.arrow("Tunnel", [0.0, -Xe_Z0_Ip], [0.9 * r[-1], -Xe_Z0_Ip])
ar1.Plot(ax1, color="g")

ax1.set_xlim((r[0], r[-1]))
ax1.set_ylim((0.95 * Umin, Umax))

ax1.set_xlabel("r [bohr]")
ax1.set_ylabel("Energy [Hartree]")

leg = ax1.legend(loc="best")
leg.get_frame().set_alpha(0.75)

for ext in ["pdf", "svg"]:
    plot.savefig("ionization_tunnel." + ext)
plot.show()
Example #40
0
#coding=utf-8
import pandas as pd
import numpy as np
import plot as plts

path = "D:\\fallingspace\\perceptron\\data\\ex4Data\\"
dataX = pd.read_table(path + "ex4x.dat",
                      sep="   ",
                      header=None,
                      engine='python')
dataY = pd.read_table(path + "ex4Y.dat",
                      sep="   ",
                      header=None,
                      engine='python')

x = dataX[0]
y = dataX[1]
value = dataY[0]
plts.show(x, y, value)
# plts.show(x,y,value)
# plts.drawLine(dataX)
Example #41
0
# You should adjust these hyperparameters
# NOTE: one iteration means model forward-backwards one batch of samples.
#       one epoch means model has gone through all the training samples.
#       'disp_freq' denotes number of iterations in one epoch to display information.

config = {
    'learning_rate': 0.01,
    'weight_decay': 0,
    'momentum': 0.7,
    'batch_size': 100,
    'max_epoch': 300,
    'disp_freq': 5,
    'test_epoch': 2
}
lo_list=[]
ac_list=[]
categories=[1,2,3,4]
for epoch in range(config['max_epoch']):
    LOG_INFO('Training @ %d epoch...' % (epoch))
    train_net(model, loss, config, train_data, train_label, config['batch_size'], config['disp_freq'])

    if epoch % config['test_epoch'] == 0:
        LOG_INFO('Testing @ %d epoch...' % (epoch))
        lo,ac=test_net(model, loss, test_data, test_label, config['batch_size'])
        lo_list.append(lo)
        ac_list.append(ac)


show4category(model, test_data, test_label, categories)
show(lo_list,ac_list,config['max_epoch'],config['test_epoch'])
Example #42
0
def draw_histos(vis=False, filepath='/output/evalSave/'):
    print(filepath)
    if not os.path.exists(filepath):
        print('path created')
        os.makedirs(filepath)
    
    with open(os.path.join(filepath, 'eval.json'), 'r') as j:
        data = json.load(j)
        
    # build precision-recall curves
    prec_03, rec_03 = eval_plot.sort_prec_rec(data['evaluation']['prec']['0.3'], data['evaluation']['rec']['0.3'])
    eval_plot.plot_prec_rec(rec_03, prec_03, 'Thrs: 0.3', color=(1, 0, 1))
        
    prec_05, rec_05 = eval_plot.sort_prec_rec(data['evaluation']['prec']['0.5'], data['evaluation']['rec']['0.5'])
    eval_plot.plot_prec_rec(rec_05, prec_05, 'Thrs: 0.5', color=(0, 1, 0))
    
    # build smoothed precision-recall curves
    # smooth PR-curve as described in http://cs229.stanford.edu/section/evaluation_metrics.pdf#
    smoothed_prec_03 = [max(prec_03[idx:]) for idx, _ in enumerate(prec_03)]
    smoothed_prec_05 = [max(prec_05[idx:]) for idx, _ in enumerate(prec_05)]
    
    eval_plot.plot_prec_rec(rec_03, smoothed_prec_03, 'Thrs_smoothed: 0.3', color=(1, 0, 1), linestyle="--")
    eval_plot.plot_prec_rec(rec_05, smoothed_prec_05, 'Thrs_smoothed: 0.5', color=(0, 1, 0), linestyle="--")
    
    #auc_03 = np.trapz(smoothed_prec_03, rec_03)
    #auc_05 = np.trapz(smoothed_prec_05, rec_05)

    auc_03 = np.trapz(prec_03, rec_03)
    auc_05 = np.trapz(prec_05, rec_05)
    
    title = "conf. stride: {:0.2f}, max. conf.: {:0.4f}, AUC_03: {:0.2f}, AUC_05: {:0.2f}".\
                                                format(data['confidence stride'], data['max_conf'], auc_03, auc_05)

    eval_plot.config(data['name'], title=title)
 
    
    eval_plot.savefig(os.path.join(filepath, (data['name'] + '.svg')))
    if vis:
        eval_plot.show()
    else:
        eval_plot.clearfig()

    # build mean iou - recall curves        
    postfix = ['', '_low', '_mid', '_high']
    
    for p in postfix:        
        eval_plot.plot_prec_rec(data['evaluation']['rec' + p]['0.3'], data['evaluation']['m_iou' + p]['0.3'], 'halt au', color=(1, 0, 1))
                
        eval_plot.plot_prec_rec(data['evaluation']['rec' + p]['0.5'], data['evaluation']['m_iou' + p]['0.5'], 'halt au', color=(0, 1, 0))
        
        title = "conf. stride: {:0.2f}, max. conf.: {:0.4f},\nAUC_03: {:0.2f}, AUC_05: {:0.2f}".\
                                                    format(data['confidence stride'], data['max_conf'], auc_03, auc_05)
               
        eval_plot.config(data['name'], title=title)
        
        eval_plot.savefig(os.path.join(filepath, 'm_iou' + p + '.svg'))
        if vis:
            eval_plot.show()
        else:
            eval_plot.clearfig()
    
    classes = ["Background", "Ferry", "Buoy", "Vessel/ship", "Speed boat", "Boat", "Kayak", "Sail boat", "Swimming person", "Flying bird/plane", "Other"]        
    
    num_classes = data['evaluation']['conf_mat']["0.3"][0][1]
    cm = np.array(data['evaluation']['conf_mat']["0.3"][0][0]).reshape((num_classes, num_classes))
    name = os.path.join(filepath, "confusion_matrix_03_05.svg")
    eval_plot.plot_confusion_matrix(cm, classes, name)
    
    cm = np.array(data['evaluation']['conf_mat']["0.3"][1][0]).reshape((num_classes, num_classes))
    name = os.path.join(filepath, "confusion_matrix_03_075.svg")
    eval_plot.plot_confusion_matrix(cm, classes, name)
    
    cm = np.array(data['evaluation']['conf_mat']["0.5"][0][0]).reshape((num_classes, num_classes))
    name = os.path.join(filepath, "confusion_matrix_05_05.svg")
    eval_plot.plot_confusion_matrix(cm, classes, name)
    
    cm = np.array(data['evaluation']['conf_mat']["0.5"][1][0]).reshape((num_classes, num_classes))
    name = os.path.join(filepath, "confusion_matrix_05_075.svg")
    eval_plot.plot_confusion_matrix(cm, classes, name)