Ejemplo n.º 1
0
def plot_data(pickle_name,pause=False,basename=None,roi=None, cycles=None):
  #read a pickle file created by simulate and create plots.  Basename is directory
  #plus optional characters to put in front of plot names
  #cycles is (from,to) tuple of the cycle numbers to plot, inclusive, or a single int
  with open(pickle_name,'rb') as fp:
    data=pickle.load(fp)
    plots.plot(data,roi,pause=pause,out_place=basename, cycles=None)
Ejemplo n.º 2
0
def simulate_and_plot(override_param, do_plots=False, pause=False,
             outdir=None,params_file='params',verbose=False, roi=None):
  data,ret,param,pickle_name,out_place= simulate(override_param, outdir=outdir,
                                                 params_file=params_file, verbose=verbose)
  if do_plots:
    plots.plot(data, roi, pause=pause, out_place=out_place)
  return ret,param
Ejemplo n.º 3
0
    def _save_metric(self, metric, metric_name='loss', y_label='Test Error'):
        c_name = metric_name+'-'+self.name
        print_metric_mean_and_std(metric, c_name)
        if self.do_plot and (self.save_dir is not None or self.show_plot):
            plot(metric, None, None, None, None, None,
                 '', y_label, c_name, self.save_dir, self.show_plot, self.name)

        if self.save_dir is not None:
            save_nparray(metric, c_name, self.save_dir)
Ejemplo n.º 4
0
def loop(robot, input_file, comparison_file, plot_name):
    output_file = "analytic_outputs/" + plot_name + ".csv"
    outputFile = open(output_file, "w")
    """column_headers = np.array(['time', 'lidar_F', 'lidar_R',
                               'gyro', 'compass_x', 'compass_y'])
    np.savetxt(OUTPUT_FILE, column_headers)"""

    w = robot.width
    l = robot.length
    xOffset = robot.S[0] - (l // 2)
    yOffset = robot.S[1] - (w // 2)

    #pygame.init()
    #screen = pygame.display.set_mode((robot.room_width, robot.room_length))
    with open(input_file) as csvFile:
        csvReader = csv.reader(csvFile, delimiter=',')
        inputs = list(csvReader)

        time = np.arange(0, 20.005, 0.005)
        lidar = np.zeros((len(time), 2))
        gyro = np.zeros((len(time), 1))
        compass = np.zeros((len(time), 2))
        position = np.zeros((len(time), 3))

        for i in range(len(time)):
            try:
                robot.state_update(inputs[i])
            except IndexError:
                break
            data = robot.get_observation()
            lidar[i] = data[:2]
            gyro[i] = data[2]
            compass[i] = np.array(data[3:])
            position[i] = robot.S.reshape((3,))

            """# draw
            screen.fill((0, 0, 0))
            angle = robot.S[2] * 180 / np.pi
            surf = pygame.Surface((l, w)).convert_alpha()
            surf.fill((0, 128, 255))
            x = xOffset + robot.S[0]
            y = yOffset + robot.S[1]
            blitRotate(screen, surf, (x, y), (l // 2, w // 2), -angle)
            pygame.display.update() """

        output_matrix = time
        output_matrix = np.column_stack((output_matrix, lidar))
        output_matrix = np.column_stack((output_matrix, gyro))
        output_matrix = np.column_stack((output_matrix, compass))
        output_matrix = np.column_stack((output_matrix, position))
        np.savetxt(output_file, output_matrix, delimiter=' ', fmt='%.4f')

        plot(output_file, comparison_file, plot_name)
Ejemplo n.º 5
0
 def on_epoch_end(self, epoch, logs={}):
     if epoch % self.period == 0 and epoch != 0:
         [
             os.remove(os.path.join(self.path, file))
             for file in os.listdir(self.path)
         ]
         used_metrics = [
             metric for metric in self.model.history.params['metrics']
             if 'val' not in metric
         ]
         for label in used_metrics:
             plot(history=self.model.history, path=self.path, label=label)
Ejemplo n.º 6
0
def main(restraints, x_axis, y_axis, one_graph=False):
    df, header = load()
    filt = filter_data(df, header, restraints)
    xs, ys, hs = extract_plotable_data(filt, header, x_axis, y_axis)

    if not (one_graph):
        for x, y, h in zip(xs, ys, hs):
            plot(x, y, x_axis, y_axis, h, header, one_graph)
    else:
        plot(xs, ys, x_axis, y_axis, hs, header, one_graph)

    return None
Ejemplo n.º 7
0
def main():
    # check if user args are velid.
    user_args = check_user_args(arguments, model_list)
    # read data.
    data = read_data.read_data(user_args[2], user_args[3])
    # call to model.
    model = model_handler.model_caller(user_args[1])
    # fit model to data.
    result = fits.fit(data, model[0])
    # plot of fitted model.
    plots.plot(data, result[0], model[1], user_args[2], show=user_args[0])
    # report of fit results.
    logs.log(user_args[2], model[1], result, show=user_args[0])
Ejemplo n.º 8
0
def main():
    start = time.time()

    pool = Pool()
    pool.map(worker, range(len(eats)))
    pool.close()
    pool.join()

    plot(plots, xc, Vc, zc, DEL, eats, nKGB, 'n', DATA, zpoints, fpoints,
         sig80)
    end = time.time()

    print('time taken = {0}'.format(end - start))
Ejemplo n.º 9
0
def launch(filepath,
           k=55,
           include_seq=True,
           threshold=0.3,
           format='pdf',
           output='out',
           show=False,
           pause=1.2,
           fix_steps=False,
           make_gif=False):
    """
    Launch function to create graph representation of assembly, collapse it, remove low covered vertices
    :param filepath: str - path to fasta file
    :param k: int - kmer size, recommended to be odd
    :param include_seq: boolean - whether to display vertex and edge sequences
    :param threshold: float - coefficient which determines threshold for coverage for vertex removing
    :param format: str - format of output images - pdf, svg, png
    :param output: str - file name of output, 'original' and 'collapsed' will be appended to it
    :param show: boolean - whether to display fiery slide show
    :param pause: float - pause between each slide
    :param fix_steps: boolean - whether to create image of graph state at every stage
    :return:
    """
    # Load file
    a = Graph(filepath, k)

    # Populate graph and cover edges
    a.fragmentate()
    a.cover_edges()
    a.edge_coverage()

    # Collapse graph and remove low covered vertices
    a.collapse_filter(threshold,
                      fix_steps=fix_steps,
                      show=show,
                      pause=pause,
                      format=format,
                      output=output)

    # Compute edge coverage
    a.edge_coverage()
    a.extract(f'{output}/out')

    # Create plot of collapsed graph if it wasn't created
    if not fix_steps:
        plot(a, f'{output}/picts/collapsed', include_seq, format, show)

    # Create animation from obtained images
    if make_gif:
        animate(f'{output}/picts', format, pause)
def selection_sort(arr, type_plot):

    # Run through all data elements
    for i in range(len(arr)):
        # Find the minimum element
        min_idx = i
        for j in range(i + 1, len(arr)):
            if arr[min_idx] > arr[j]:
                min_idx = j

        # Swap the found minimum element with
        # the first element
        arr[i], arr[min_idx] = arr[min_idx], arr[i]

        # Created typ with selected plot type
        plot(arr=arr, fallow_point=min_idx, type_plot=type_plot, title='Selection Sort')
Ejemplo n.º 11
0
def bubble_sort(arr, type_plot):
    # Traverse through all array elements
    for j in range(len(arr)):
        # Compare all elements
        for i in range(len(arr)):

            if i < (len(arr)) - 1:
                # Swap if the element found is greater
                # than the next element
                if arr[i] > arr[i + 1]:
                    arr[i], arr[i + 1] = arr[i + 1], arr[i]

                    # Created typ with selected plot type
                    plot(arr=arr,
                         fallow_point=i + 1,
                         type_plot=type_plot,
                         title='Bubble Sort')
Ejemplo n.º 12
0
 def on_epoch_end(self, epoch, logs={}):
     if epoch % self.period == 0 and epoch != 0:
         [
             os.remove(os.path.join(self.path, file))
             for file in os.listdir(self.path)
         ]
         used_metrics = [
             metric for metric in self.model.history.params['metrics']
             if 'val' not in metric
         ]
         for label in used_metrics:
             plot(history=self.model.history, path=self.path, label=label)
         evaluate_model(model=self.model,
                        path=self.path,
                        dataset=self.validation_data,
                        split='Validation',
                        target_names=self.validation_data.classes)
Ejemplo n.º 13
0
def hold_out(training_data, results):
    print 'Total Data : ' + str(len(training_data))
    print sum(results)
    test_data = training_data[-int(0.2 * len(training_data)):]
    training_data = training_data[:-int(0.2 * len(training_data))]
    results_training = results[:-int(0.2 * len(results))]
    results_test = results[-int(0.2 * len(results)):]
    zeros = 0
    ones = 0
    # unique(training_data,test_data)
    print 'Training Items : ' + str(len(training_data))
    print 'Test Items : ' + str(len(test_data))
    training_scores_logreg = []
    testing_scores_logreg = []
    training_scores_svm = []
    testing_scores_svm = []
    training_scores_rf = []
    testing_scores_rf = []
    training_scores_bnb = []
    testing_scores_bnb = []
    #training_scores_knn = []
    #testing_scores_knn = []
    for i in range(1, len(training_data) / 1000):
        print 'Iteration : ' + str(i)
        print len(testing_scores_logreg)
        logreg.fit(training_data[:i * 1000], results_training[:i * 1000])
        testing_scores_logreg.append(logreg.score(test_data, results_test))
        training_scores_logreg.append(
            logreg.score(training_data, results_training))
        clf2.fit(training_data[:i * 1000], results_training[:i * 1000])
        testing_scores_svm.append(clf2.score(test_data, results_test))
        training_scores_svm.append(clf2.score(training_data, results_training))
        rf.fit(training_data[:i * 1000], results_training[:i * 1000])
        testing_scores_rf.append(rf.score(test_data, results_test))
        training_scores_rf.append(rf.score(training_data, results_training))
        bnb.fit(training_data[:i * 1000], results_training[:i * 1000])
        testing_scores_bnb.append(bnb.score(test_data, results_test))
        training_scores_bnb.append(bnb.score(training_data, results_training))
        #logreg.fit(training_data[:i*1000],results_training[:i*1000])
        #testing_scores_logreg.append(logreg.score(test_data,results_test))
        #training_scores_logreg.append(logreg.score(training_data,results_training))
    plots.plot(training_scores_logreg, testing_scores_logreg,
               training_scores_svm, testing_scores_svm, training_scores_rf,
               testing_scores_rf, training_scores_bnb, testing_scores_bnb)
    '''
Ejemplo n.º 14
0
def train(audio_path, plot_matrix=False):

    x_data, y_data = get_set(26, 9, audio_path)
    x_data = keras.preprocessing.sequence.pad_sequences(x_data, maxlen=100)

    x_train, x_test, Y_train, Y_test = train_test_split(x_data,
                                                        y_data,
                                                        test_size=0.1,
                                                        random_state=42)

    y_train = keras.utils.to_categorical(Y_train, 16)
    y_test = keras.utils.to_categorical(Y_test, 16)

    model = getModel((x_train.shape[1], x_train.shape[2]), y_train.shape[1])

    history = model.fit(x_train,
                        y_train,
                        batch_size=10,
                        epochs=137,
                        verbose=1,
                        validation_data=(x_test, y_test))
    # Plot training & validation accuracy values
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('Model accuracy')
    plt.ylabel('Accuracy')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()

    # Plot training & validation loss values
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('Model loss')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()
    score = model.evaluate(x_test, y_test, verbose=0)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])
    model.save(model_name)

    if plot_matrix:
        plot(x_test, Y_test, model_name)
Ejemplo n.º 15
0
def hold_out(training_data,results):
	print 'Total Data : ' + str(len(training_data))
	print sum(results);
	test_data = training_data[-int(0.2*len(training_data)):]
	training_data = training_data[:-int(0.2*len(training_data))]
	results_training = results[:-int(0.2*len(results))]
	results_test = results[-int(0.2*len(results)):]
	zeros = 0
	ones = 0
	# unique(training_data,test_data)
	print 'Training Items : ' + str(len(training_data))
	print 'Test Items : ' + str(len(test_data))
	training_scores_logreg = []
	testing_scores_logreg = []
	training_scores_svm = []
	testing_scores_svm = []
	training_scores_rf = []
	testing_scores_rf = []
	training_scores_bnb = []
	testing_scores_bnb = []
	#training_scores_knn = []
	#testing_scores_knn = []
	for i in range(1,len(training_data)/1000):
		print 'Iteration : ' + str(i)
		print len(testing_scores_logreg)
		logreg.fit(training_data[:i*1000],results_training[:i*1000])
		testing_scores_logreg.append(logreg.score(test_data,results_test))
		training_scores_logreg.append(logreg.score(training_data,results_training))
		clf2.fit(training_data[:i*1000],results_training[:i*1000])
		testing_scores_svm.append(clf2.score(test_data,results_test))
		training_scores_svm.append(clf2.score(training_data,results_training))
		rf.fit(training_data[:i*1000],results_training[:i*1000])
		testing_scores_rf.append(rf.score(test_data,results_test))
		training_scores_rf.append(rf.score(training_data,results_training))
		bnb.fit(training_data[:i*1000],results_training[:i*1000])
		testing_scores_bnb.append(bnb.score(test_data,results_test))
		training_scores_bnb.append(bnb.score(training_data,results_training))
		#logreg.fit(training_data[:i*1000],results_training[:i*1000])
		#testing_scores_logreg.append(logreg.score(test_data,results_test))
		#training_scores_logreg.append(logreg.score(training_data,results_training))
	plots.plot(training_scores_logreg,testing_scores_logreg,training_scores_svm,testing_scores_svm ,training_scores_rf ,testing_scores_rf ,training_scores_bnb , testing_scores_bnb)
	'''
Ejemplo n.º 16
0
def parflow():
    INPUT_DIR = os.path.join(os.path.dirname(__file__), '..', 'parflow',
                             'inputs')
    FORCING_DIR = os.path.join(os.path.dirname(__file__), '..', 'parflow',
                               'forcing')
    OUTPUT_DIR = os.path.join(os.getenv('HOME'), 'RESULTS/output')
    PNG_PATH = os.path.join(os.getenv('HOME'), 'RESULTS/plot.png')

    # copy input files directly in the base output folder,
    # because that's where Parflow expects them to be.
    shutil.copytree(INPUT_DIR, OUTPUT_DIR, dirs_exist_ok=True)

    run.Solver.CLM.CLMFileDir = OUTPUT_DIR
    run.Solver.CLM.MetFilePath = FORCING_DIR
    run.run(working_directory=OUTPUT_DIR)
    plot(OUTPUT_DIR, run.get_name(), PNG_PATH, title=title)

    img = open(PNG_PATH, 'rb').read()
    return render_template('result.html',
                           img=base64.encodebytes(img).decode('ascii'))
def partition(arr, left, right, type_plot):
    i = (left - 1)  # smaller element
    pivot = arr[right]  # pivot

    for j in range(left, right):

        # Swap if current element is smaller than or
        # equal to pivot
        if arr[j] <= pivot:
            i = i + 1
            arr[i], arr[j] = arr[j], arr[i]

            # Create plot with arr and pivot
            plot(arr=arr,
                 fallow_point=pivot,
                 type_plot=type_plot,
                 title='Quick Sort ')

    arr[i + 1], arr[right] = arr[right], arr[i + 1]
    return i + 1
Ejemplo n.º 18
0
    def plot_to_file(self):
        file = filenames['basic_plot'] % curr
        data = self.data['input']['processed']

        args = {}
        args['filename'] = file
        args['data'] = data
        args['y_label'] = curr + "/PLN"
        args['x_label'] = 'czas [data]'

        return plot(args)
Ejemplo n.º 19
0
    def plot_to_file_log_return(self):
        file = filenames['basic_log_return_plot'] % curr
        data = self.data['input']['log_returns']

        args = {}
        args['filename'] = file
        args['data'] = data
        args['y_label'] = "logarytmiczna stopa zwrotu, średnia %d dni" % (
            2 * moving_average_delta + 1)
        args['x_label'] = 'czas [data]'

        return plot(args)
Ejemplo n.º 20
0
    def plot_data_with_equation(self, file_name, equation):
        fitted_signal = self.fourier_analyse_inverse(equation)

        args = {}
        args['data'] = [
            self.data['input']['processed'][0],  # time
            self.data['input']['processed'][1],  # real values
            fitted_signal['signal_abs']  # fitted values
        ]
        args['filename'] = file_name

        return plot(args)
Ejemplo n.º 21
0
def simulationAnnealing(NT, snapNT, spins, J, N, M, kB, gamma, T0, Tsteps):
    #initialization for first simulation
    spins = spinsInit(N, M)

    #keeping track of magnetization
    globals.magnetizationInit()
    globals.magnetizationHistoryInit()
    globals.magnetization = spins.sum()

    #calculating the decreas factor of temperature
    decreasFactor = (0.05 / T0)**(1 / Tsteps)

    #Array of slowly decreasing temperatures
    iterationArray = np.zeros(Tsteps)
    for i in range(len(iterationArray)):
        iterationArray[i] = T0 * decreasFactor**i

    #Annealing
    for t in iterationArray:
        beta = 1 / t / kB
        simulation(NT, snapNT, spins, J, N, M, beta, gamma)
        plots.plot(spins, gamma, 1, 1)
def heapify(arr, n, i, type_plot):
    largest = i  # Initialize largest as root
    l = 2 * i + 1  # left = 2*i + 1
    r = 2 * i + 2  # right = 2*i + 2

    # See if left child of root exists and is
    # greater than root
    if l < n and arr[i] < arr[l]:
        largest = l

    # See if right child of root exists and is
    # greater than root
    if r < n and arr[largest] < arr[r]:
        largest = r

        # Change root, if needed
    if largest != i:
        arr[i], arr[largest] = arr[largest], arr[i]  # swap

        # Heapify the root.
        heapify(arr, n, largest,  type_plot)

        plot(arr=arr, fallow_point=largest, type_plot=type_plot, title='Heap Sort')
Ejemplo n.º 23
0
    def plot_freq_basic_analysis(self):
        omegas = self.freq_domain['omegas']
        dft_abs = self.freq_domain['dft_abs']

        points = self.find_valuable_maxims()

        file = filenames['basic_fourier_analysis_freq'] % curr

        args = {}
        args['filename'] = file
        args['data'] = [omegas, dft_abs]
        args['highlight_data'] = points
        args['y_label'] = 'amplituda'
        args['x_label'] = 'częstość [1/dzień]'
        return plot(args)
Ejemplo n.º 24
0
    def plot_returns(self):
        arguments = {}

        timing = self.data['fourier_fit']['returns'][0]
        values1 = self.data['input']['log_returns'][1]
        values2 = self.data['fourier_fit']['returns'][1]
        arguments['data'] = [timing, values1, values2]

        name = filenames['both_log_return_plot'] % curr
        arguments['filename'] = name

        arguments['y_label'] = "logarytmiczna stopa zwrotu, średnia %d dni" % (
            2 * moving_average_delta + 1)
        arguments['x_label'] = "czas [data]"

        return plot(arguments)
Ejemplo n.º 25
0
    def plot_basic_data_with_dff_fit(self):
        self.find_valuable_maxims(True)

        equation = self.translate_maxims_to_equation()
        fitted_signal = self.fourier_analyse_inverse(equation)

        file = filenames['basic_fourier_analysis_fit'] % curr

        args = {}
        args['data'] = [
            self.data['input']['processed'][0],  # time
            self.data['input']['processed'][1],  # real values
            fitted_signal['signal_abs']  # fitted values
        ]
        args['filename'] = file

        return plot(args)
Ejemplo n.º 26
0
def main_page():
    data_list = [cases[place] for place in current_places]
    if request.method == 'POST':
        query = request.form.get('place')
        if query in cases.keys() and query not in current_places:
            current_places.append(query)
        function = request.form.get('function')
        try:
            data_list = [eval(function) for place in current_places]
        except:
            data_list = [cases[place] for place in current_places]
    script, div, js_resources, css_resources = plot(dates, data_list,
                                                    current_places)
    return render_template(
        'index.html',
        current_places=current_places,
        places=filter(is_country, list(cases.keys())),
        plot_script=script,
        plot_div=div,
        js_resources=js_resources,
        css_resources=css_resources,
    )
Ejemplo n.º 27
0
from stimuli import visual_stimulus
from plots import plot

try:
    Runcase=int(sys.argv[-1])
except ValueError:
    print('Provide aither an integer, the key "all", or a specific key (see run.py)')
    Runcase=str(sys.argv[-1])


np.random.seed(1)
Nshow = 8

if (Runcase==19) or (Runcase=='model-doc') or (Runcase=='all'):
    model = earlyVis_model(from_file='data/dense-noise.npz')
    ps = plot(model=model)
    fig = ps.protocol_plot(cell_plot=np.random.choice(np.arange(model.Ncells), Nshow, replace=False))
    fig.savefig('docs/figs/response-dense-noise.png')
    ps.show()
    
if (Runcase==18) or (Runcase=='model-doc') or (Runcase=='all'):
    model = earlyVis_model(from_file='data/sparse-noise.npz')
    ps = plot(model=model)
    fig = ps.protocol_plot(cell_plot=np.random.choice(np.arange(model.Ncells), Nshow, replace=False))
    fig.savefig('docs/figs/response-sparse-noise.png')
    ps.show()
    
if (Runcase==15) or (Runcase=='model-doc') or (Runcase=='all'):
    model = earlyVis_model(from_file='data/static-grating.npz')
    ps = plot(model=model)
    fig = ps.protocol_plot(cell_plot=np.random.choice(np.arange(model.Ncells), Nshow, replace=False))
Ejemplo n.º 28
0
ns = 2 # number of sweeps
dE = 0.0001 # V, potential increment. This value has to be small for BI to approximate the circuit properly

t, E = wf.sweep(Eini=Eini, Efin=Efin, dE=dE, sr=sr, ns=ns) # Creates waveform

g0 = Q0/F # mol/cm2, maximum coverage for 1 monolayer
eps = n*FRT*(E-E0)
kf = ks*np.exp(alpha*eps)
kb = ks*np.exp(-(1-alpha)*eps)

# Simulation parameters
nt = np.size(t)
dt = t[1]

Th = np.ones(nt)

#%% Simulation
for j in range(1,nt):
    
    # Backwards implicit:
    Th[j] = (Th[j-1] + dt*kb[j-1])/(1 + dt*(kf[j-1]+kb[j-1]))

# Denormalisation
i = -n*F*Ageo*g0*(kb - (kf+kb)*Th)
Q = g0*F*(1-Th)
end = time.time()
print(end-start)

#%% Plot
p.plot(E, i, "$E$ / V", "$i$ / A")
p.plot(E, Q*1e6, "$E$ / V", "$Q$ / $\mu$C cm$^{-2}$")
Ejemplo n.º 29
0
X = np.linspace(0, Xmax, nX)  # Discretisation of distance
eps = (E - E0) * n * FRT  # adimensional potential waveform
delta = np.sqrt(D * t[-1])  # cm, diffusion layer thickness
K0 = ks * delta / D  # Normalised standard rate constant

#%% Simulation
for k in range(1, nT):
    # Boundary condition, Butler-Volmer:
    C[k, 0] = (C[k - 1, 1] +
               dX * K0 * np.exp(-alpha * eps[k])) / (1 + dX * K0 * (np.exp(
                   (1 - alpha) * eps[k]) + np.exp(-alpha * eps[k])))

    # Solving finite differences:
    for j in range(1, nX - 1):
        C[k, j] = C[k - 1, j] + lamb * (C[k - 1, j + 1] - 2 * C[k - 1, j] +
                                        C[k - 1, j - 1])

# Denormalising:
i = n * F * Ageo * D * cB * (-C[:, 2] + 4 * C[:, 1] - 3 * C[:, 0]) / (2 * dX *
                                                                      delta)
cR = C * cB
cO = cB - cR
x = X * delta
end = time.time()
print(end - start)

#%% Plot
p.plot(E, i, "$E$ / V", "$i$ / A")
p.plot2(x, cR[-1, :] * 1e6, x, cO[-1, :] * 1e6, "[R]", "[O]", "x / cm",
        "c($t_{end}$,$x$=0) / mM")
Ejemplo n.º 30
0
    epilog='''
''')

parser.add_argument("files", nargs='+',type=str, help="pickle files to plot", default=None)
parser.add_argument('-c', '--cycles', type=parseIntRange, help="cycle or range of cycles to plot, .e.g. 4 or 0-2", default=None)
parser.add_argument('-n', '--nopause', action='store_true', help="do not pause after generating plots")
options = parser.parse_args()

print('git revision:{}'.format(util.get_git_commit()))

  
class AttrDict(dict):
  def __init__(self, *args, **kwargs):
    super(AttrDict, self).__init__(*args, **kwargs)
    self.__dict__ = self

for file in options.files:
  with open(file, 'rb') as fp:
    data=pickle.load(fp)
  print('loaded {}'.format(file))
  print('cyles {}'.format(options.cycles))
  head,tail=os.path.split(file)
  print('head={} tail={}'.format(head,tail))
  #take the part up to the dash in tail, and add something
  parts=tail.split('-')
  out_place=os.path.join(head,parts[0]+'-replot')
  print('out_place={}'.format(out_place))
  plots.plot(data, out_place=out_place, pause=not options.nopause,
             cycles=options.cycles, text=parts[0])

Ejemplo n.º 31
0
            if (t>=surround_delay) and (t<=surround_delay+surround_duration) and (t>=center_delay) and (t<=center_delay+center_duration):
                self.static_center_surround_grating(i+self.it0, **args)
            elif (t>=surround_delay) and (t<=surround_delay+surround_duration):
                self.static_surround_grating(i+self.it0, **args)
            elif (t>=center_delay) and (t<=center_delay+center_duration):
                self.static_center_surround_grating(i+self.it0, **args)
                

        

if __name__=='__main__':

    
    from plots import plot
    
    stim = visual_stimulus(sys.argv[-1])
    
    stim_plot= plot(stimulus=stim, graph_env_key='visual_stim')
    
    stim_plot.screen_movie(stim)
    # if stim.stimulus_params['static']:
    #     stim_plot.screen_plot(stim.full_array[0,:,:])
    # else:
    #     stim_plot.screen_movie(stim)
    
    stim_plot.show()
    


    
Ejemplo n.º 32
0
def hold_out_training(training_data,results):
	print 'Length : ' + str(len(training_data))
	test_data = training_data[-int(0.3*len(training_data)):]
	training_data = training_data[:-int(0.3*len(training_data))]
	results_training = results[:-int(0.3*len(results))]
	results_test = results[-int(0.3*len(results)):]
	rf.fit(training_data,results_training)
	print rf.score(test_data,results_test)
	'''
	pre_shuffle_stuff = []
	counter = 0
	for item in results_training:
		item = item.append(results_training[counter])		
	accuracies = []
	#shuffle(training_data)
	'''
	accuracies_svm = []
	accuracies_rf = []
	accuracies_knn = []
	accuracies_svm_rbf = []
	for j in range(10,len(training_data),1):
		correct = 0
		wrong = 0
		curr_training_data = training_data[:j]
		curr_results = results_training[:j]
		rf.fit(curr_training_data,curr_results)	
		rf.score(test_data,results_test)
		y_true = []
		y_pred = []
		for i in range(len(test_data)):
			result = rf.predict(test_data[i])
			
			if result[0] == results_test[i]:
				correct = correct + 1
				#print 'Correct : ' + str(result)
			else:
				wrong  = wrong + 1
				#iprint 'Wrong : ' + str(result)
		print correct,wrong
		accuracy = (float(correct))/(float(correct) + float(wrong))
		accuracies_rf.append(accuracy)
		print 'Accuracy : ' + str(accuracy)
		knn.fit(curr_training_data,curr_results)
		knn.score(test_data,results_test)
		correct = 0
		wrong = 0
		for i in range(len(test_data)):
			result = knn.predict(test_data[i])
			
			if result[0] == results_test[i]:
				correct = correct + 1
				#print 'Correct : ' + str(result)
			else:
				wrong  = wrong + 1
				#iprint 'Wrong : ' + str(result)
		print correct,wrong
		accuracy = (float(correct))/(float(correct) + float(wrong))
		accuracies_knn.append(accuracy)
		print 'Accuracy : ' + str(accuracy)
		clf2.fit(curr_training_data,curr_results)
		clf2.score(test_data,results_test)
		correct = 0
		wrong = 0
		for i in range(len(test_data)):
			result = knn.predict(test_data[i])
			
			if result[0] == results_test[i]:
				correct = correct + 1
				#print 'Correct : ' + str(result)
			else:
				wrong  = wrong + 1
				#iprint 'Wrong : ' + str(result)
		print correct,wrong
		accuracy = (float(correct))/(float(correct) + float(wrong))
		accuracies_svm.append(accuracy)
		print 'Accuracy : ' + str(accuracy)
		clf.fit(curr_training_data,curr_results)
		clf.score(test_data,results_test)
		correct = 0
		wrong = 0
		for i in range(len(test_data)):
			result = knn.predict(test_data[i])
			
			if result[0] == results_test[i]:
				correct = correct + 1
				#print 'Correct : ' + str(result)
			else:
				wrong  = wrong + 1
				#iprint 'Wrong : ' + str(result)
		print correct,wrong
		accuracy = (float(correct))/(float(correct) + float(wrong))
		accuracies_svm_rbf.append(accuracy)
		print 'Accuracy : ' + str(accuracy)
	plots.plot(accuracies_rf,accuracies_knn,accuracies_svm,accuracies_svm_rbf,training_data)