def simulate(C, Gc, ggap, gsyn, n_timesteps):
    """
    Runs a standard simulation of NeuralModel with the given parameter values
    Note this function does not specify seed, it lets the model use a random seed
    Args:
        C - cell membrane capacitance pF / 100 = arb
        Gc - cell membrane conductance pS / 100 = arb
        ggap - global gap junction conductance pS / 100 = arb
        gsyn - global synaptic conductance pS / 100 = arb
        n_timesteps - how long to run the model for
    Returns:
        fwd_dynamics (n_timesteps - 300 x n_neurons) - matrix of normalized membrane potential time series for
            all neurons.
    """
    # initialize model
    model = NeuralModel(neuron_metadata_collection, C, Gc, ggap, gsyn)
    model.set_current_injection("AVBL", 2.3)
    model.set_current_injection("AVBR", 2.3)
    model.set_current_injection("PLML", 1.4)
    model.set_current_injection("PLMR", 1.4)
    model.init()

    # simulate
    (v_mat, s_mat, v_normalized_mat) = model.run(n_timesteps)
    return v_normalized_mat
Esempio n. 2
0
    def guess_number(self, kind=2, confidence_threshold=0):
        guy = NeuralModel.instance()
        prediction, number, accuracy = guy.guess(self.image)
        self.accuracy = accuracy
        self.number = number

        return self.number
Esempio n. 3
0
    def guess_number(self, confidence_threshold=0):
        # Saves a buffer of guesses
        # Guesses every self.maxtimer frames( i.e predicts once in 10 frames)
        self.timer += 1
        if self.timer >= self.maxtimer:
            self.timer = 0

            if self.image is None:
                self.prev_guesses.appendleft(
                    np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
            else:
                neuron = NeuralModel.instance()
                prediction, number, accuracy = neuron.guess(self.image)
                self.accuracy = accuracy
                self.prev_guesses.appendleft(np.array(prediction))

        m = np.mean(self.prev_guesses, axis=0)
        number = np.argmax(m, axis=0)

        self.number = number

        if m[number] > confidence_threshold:
            self.number = number

        return self.number
Esempio n. 4
0
def test(args):
    """
    test models for each label
    @param args (dict): command line args
    """
    entail_pairs, neutral_pairs, contradict_pairs = extract_pair_corpus(args['--test-file'])
    labels = ['entail', 'neutral', 'contradict']
    for i, label in enumerate(labels):
        if label == 'entail':
            data = entail_pairs
        elif label == 'neutral':
            data = neutral_pairs
        elif label == 'contradict':
            data = contradict_pairs
        
        save_gen_hyp_path = label + '_test' + args['--save-generated-hyp-to']
        prems = [prem for (prem, hyp) in data]
        model_path = label.upper() + '_MODEL'
        model = NeuralModel.load(args[model_path]) 
        model = model.to(device)
        gen_hyps, sim_score, bleu_score = evaluate(args, data, model)
        save_generated_hyps(save_gen_hyp_path, prems, gen_hyps)
        print('%s sim score = %.2f, BLEU score = %.2f' % (label, sim_score, bleu_score))
Esempio n. 5
0
- results/milestone_oscillation_svdist.png
- results/milestone_oscillation_trajectory_2sv.png
"""

from util.neuron_metadata import *
from util.plot_util import *
import numpy as np
import pandas as pd
from neural_model import NeuralModel
from sklearn.decomposition import PCA
from mpl_toolkits import mplot3d

neuron_metadata_collection = NeuronMetadataCollection.load_from_chem_json(
    'data/chem.json')

model = NeuralModel(neuron_metadata_collection)
model.seed = 0
model.set_current_injection("AVBL", 2.3)
model.set_current_injection("AVBR", 2.3)
model.set_current_injection("PLML", 1.4)
model.set_current_injection("PLMR", 1.4)
model.init()
(v_mat, s_mat, v_normalized_mat) = model.run(2700)
# The oscillatory dynamic doesn't stabilize until about dt*300 onwards.
# Also, interactome analysis is done after the first 50 timesteps.
fwd_dynamics = v_normalized_mat[300:, :]

# Plot some motor neurons.
fig = plot_saved_dynamics(['AS01', 'DA01', 'DD01'], fwd_dynamics,
                          neuron_metadata_collection)
fig.savefig("results/milestone_oscillation_motorneurons.png")
Esempio n. 6
0
def train_lg_model(args, vocab, embeddings, train_data, dev_data, label):
    """
    train LG model on the specific label
    @param args (dict): command line args
    @param vocab (Vocab): Vocab class obj
    @param embeddings (torch.tensor(len(vocab), embed_dim)): pretrained word embeddings
    @param train_data (list[tuple]): list of train (prem, hyp) pairs
    @param dev_data (lis(tuple)): list of dev (prem, hyp) pairs
    @param label (str): hyp label    
    """
    train_batch_size = int(args['--batch-size'])
    clip_grad = float(args['--clip-grad'])
    model_save_path = label + args['--save-model-to']

    model = NeuralModel(vocab, int(args['--embed-size']), embeddings,
                        hidden_size=int(args['--hidden-size']),
                        dropout_rate=float(args['--dropout']))
    model = model.to(device)

    init_lr = float(args['--lr'])
    optimizer = torch.optim.Adam(model.parameters(), lr=init_lr)

    total_loss = .0
    total_hyp_words = 0

    dev_prems = [prem for (prem, hyp) in dev_data]
    save_gen_hyp_path = label + args['--save-generated-hyp-to']

    hist_dev_scores = []
    patience = 0

    begin_time = time.time()
    for epoch in range(int(args['--max-epoch'])):
        for prems, hyps in batch_iter(train_data, batch_size=train_batch_size, shuffle=True):
            num_hyp_words_to_predict = sum(len(hyp[1:]) for hyp in hyps)

            optimizer.zero_grad()
            
            batch_size = len(prems)
            batch_loss = -model(prems, hyps).sum()
            loss = batch_loss / num_hyp_words_to_predict

            loss.backward()

            grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), clip_grad)
            optimizer.step()
            
            batch_losses_val = batch_loss.item()
            total_loss += batch_losses_val

            total_hyp_words += num_hyp_words_to_predict
        
        print('epoch = %d, loss = %.2f, perplexity = %.2f, time_elapsed = %.2f sec'
            % (epoch, total_loss / total_hyp_words, 2**(total_loss / total_hyp_words), time.time() - begin_time))
        #reset epoch progress vars
        total_loss = .0
        total_hyp_words = 0

        #perform validation
        dev_hyps, sim_score, bleu_score = evaluate(args, dev_data, model)
        is_better = epoch == 0 or sim_score > max(hist_dev_scores)
        hist_dev_scores.append(sim_score)

        if is_better:
            #reset patience
            patience = 0
            #save model
            model.save(model_save_path)
            #save generated hyps
            save_generated_hyps(save_gen_hyp_path, dev_prems, dev_hyps)

        else:
            patience += 1
            if patience == int(args['--patience']):
                print('finishing training: dev sim score = %.2f, BLEU score = %.2f'
                    % (sim_score, bleu_score))
                return

        print('validation: dev sim score = %.2f, BLEU score = %.2f'
            % (sim_score, bleu_score))

        #update lr after every 2 epochs
        lr = init_lr / 2 ** (epoch // 2)
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
Esempio n. 7
0
    parser.add_argument("--thread-count", default=cpu_count()) #
    parser.add_argument("--depth", default=8) #
    parser.add_argument("--window-size", default=5) #
    parser.add_argument("--split-factor", default=0.2)
    parser.add_argument("--num_lstms", default=1) #
    parser.add_argument("--dropouts", default=[0.2], nargs='+') #
    parser.add_argument("--units", default=[512], nargs='+') #
    parser.add_argument("--output-model-name")
    parser.add_argument("--load-model")
    parser.add_argument("--classify", action='store_true')

    args = parser.parse_args()
    if args.model == "GBDT":
        model = ForestModel(args.learning_rate, args.depth, args.iterations, args.thread_count, args.window_size)
    elif args.model == "LSTM":
        model = NeuralModel(args.num_lstms, args.dropouts, args.units)
    model.prepare(args.convert_from, args.convert_to, args.split_factor)
    if args.load_model:
        model.load(args.load_model)
    else:
        model.train()
    if args.output_model_name:
        model.save([ args.output_model_name + str(i) + '.h5' for i in range(args.num_lstms)])
    quality = model.test(not args.classify)
    for name, value in quality:
        print(name, ":", value)

    if args.classify:
        with open(args.convert_from, 'r') as f1, open(args.convert_to, 'r') as f2:
            new_queries = get_new_parses_for_first(f2, f1)
            logging.info("Total new words %s", len(new_queries))
from __future__ import print_function
import cv2
import numpy as np
from neural_model import NeuralModel
from sudoku_tools import *

cap = cv2.VideoCapture(0)
cv2.startWindowThread()

# Loading our neuralModel
NeuralModel.instance()
required_num_in_sol = "123456789"
try:
    while True:
        _, img = cap.read()

        img_shape = img.shape
        output_shape = (img_shape[1], img_shape[0])

        #check for a valid box corners
        corners = get_sudoku_box(img, draw_contours=True)
        if corners is not None:

            cropped_sudoku, sudoku_crop_thresh, extracted_digits, predicted_unsolved_grid, img_cropped_sudoku, img_final, sudoku = sudoku_main(
                img, corners, required_num_in_sol=required_num_in_sol)

            #creating collage of cropped_thresholded_input_sudoku and solved_cropped_sudoku
            div = np.zeros((250, 25), np.float64)
            div.fill(255)
            input_sudoku = extracted_digits
            input_sudoku = cv2.resize(input_sudoku, (250, 250))