Exemple #1
0
def evaluate(sample_file, **kwargs):
    use_duration = True if kwargs['D'] else False
    window = int(kwargs['w'])
    pv('window', 'use_duration', stdout=True)

    samples = None
    models = None

    if kwargs['M'] or kwargs['B']:
        samples = {classification.normalize_name(t): [t] for t in sample_file}
        models = extract_models(ZIPPED_MODELS)

    if samples and models:
        if kwargs['M']:
            classification.evaluate(samples, models,
                                    use_duration=use_duration,
                                    window=window)

        if kwargs['B']:
            binary_samples = classification.binarize(samples)
            binary_models = classification.binarize(models)

            if binary_samples and binary_models:
                classification.evaluate(binary_samples, binary_models,
                                        use_duration=use_duration,
                                        window=window)
Exemple #2
0
    def _single_run(self):
        # Reset model weights
        self.model.reset()
            # Generate set for this run
        stimuli_set = make_set_from_specs(
            origin_transform_function=self.origin_transform_function,
            origin_transform_params=self.origin_transform_params,
            set_transform_function=self.set_transform_function,
            set_transform_params=self.set_transform_params,
            **self.stimuli_creation_params)
        # Split into training and test
        test_set, training_set = stimuli_set.split(self.training_params['fraction_training'])

        # Add Current StimuliSet to list of StimuliSets in the Experiment object
        self.stimuli_sets['training'].append(training_set)
        self.stimuli_sets['test'].append(test_set)
        # Handle conversion to tempotron format once
        test_set._make_tempotron_converted
        training_set._make_tempotron_converted()
        # Initial evaluation over test set
        pre = evaluate(stimuli_set=test_set,
                       tempotron=self.model)
        # Train model with training set
        batch_train(stimuli_set=training_set, tempotron=self.model, **self.training_params)
        # Evaluate post training performance
        post = evaluate(stimuli_set=test_set, tempotron=self.model)
        # Calculate difference
        diff = post - pre
        # Add more data to results
        orig_stimuli_distance = test_set.original_stimuli_distance  # It doesnt matter whether its taken from test or training as they are made from the same two stimuli
        origs_a_mean_freq = np.mean(
            np.multiply([neuron.size for neuron in stimuli_set.original_stimuli[0]],
                        1000 / self.stimuli_creation_params['stimulus_duration'])
        )
        origs_b_mean_freq = np.mean(
            np.multiply([neuron.size for neuron in stimuli_set.original_stimuli[1]],
                        1000 / self.stimuli_creation_params['stimulus_duration'])
        )
        single_results = pd.Series([origs_a_mean_freq, origs_b_mean_freq, orig_stimuli_distance, pre, post, diff],
                                   ['orig_a_mean_freq', 'orig_b_mean_freq', 'orig_distance', 'pre', 'post', 'diff'])
        return single_results
Exemple #3
0
def evaluate():
    """
    Called when evaluation should be run
    :return:
    """
    if classifier is None:
        print("Make sure to select a classifier")
        return
    if parameterizer is None:
        print("Make sure to select a parameterizer")
        return

    text = text_input.get("1.0", END)
    if text == "\n":
        print("Make sure you input a text for classification")
        return

    articles = preprocessing.get_train_set()
    parameterization.setup_parameterizator(parameterizer, articles)
    classification.setup_classifier(classifier)
    text = text_input.get("1.0", END)
    topic = classification.evaluate(text, articles)
    topic_label.configure(text="Topic: {0}".format(topic))
Exemple #4
0
import classification
from classification import evaluate

year = evaluate('M/1983_Massachusetts_Plymouth_Plymouth-Carver_37-4.png')
print year
Exemple #5
0
                        required = True,
                        type = float, 
                        help = "Learning rate")

    parser.add_argument("--gradient_accumulation_steps",
                        default = 1,
                        type = int,
                        help = "Batch size = batch_size * gradient_accumulation_steps")

    parser.add_argument("--seed", default = 42, type = int, help = "Random seed")

    parser.add_argument("--batch_size", default = 5, type = int, help = "Batch size for training")

    parser.add_argument("--seq_len", default = 256, type = int, help = "Maximum Sequnece Length for input")
    
    args = parser.parse_args()

    set_seed(args)
    
    model = models[args.model]
    tokenizer = tokenizers[args.tokenizer]
    optimizer = optimizers[args.optimizer](
                model.parameters(), lr = args.learning_rate)

    train_loader, valid_loader, test_loader = dataloader(tokenizer, args)
    
    for i in range(args.epochs):
        train(model, optimizer, train_loader, args)
        evaluate(model, valid_loader)

    evaluate(model, test_loader)
Exemple #6
0
 def predict(self, image_path):
 	year = evaluate(image_path)
   	return year
Exemple #7
0
import random
from nltk.corpus import movie_reviews
from review_sentiment import ReviewSentiment
import classification

if __name__ == '__main__':
    labeled_data = [(movie_reviews.raw(fileids=fileid),
                     movie_reviews.categories(fileid)[0])
                    for fileid in movie_reviews.fileids()]
    random.seed(1234)
    random.shuffle(labeled_data)
    labeled_data = labeled_data[:100]
    rs = ReviewSentiment(labeled_data, train_size=50)
    classifiers = classification.train(rs)
    classification.evaluate(rs, classifiers)
    classifier = classifiers[0][0]
    print()
    print("positive reviews prediction")
    classification.predict(rs, "data/positive/", classifier, 0)
    print()
    print("negative reviews prediction")
    classification.predict(rs, "data/negative/", classifier, 0)
Exemple #8
0
import preprocessing
import parameterization
import classification

"""
This script evaluates the accuracy of all parameterization and classification combinations
"""
if __name__ == '__main__':
    parameterizers = ["bow", "tf", "tf_idf"]
    classifiers = ["euclid", "bayes", "rocchio"]

    train_articles = preprocessing.get_train_set()
    test_articles = preprocessing.get_test_set()

    for p in parameterizers:
        parameterization.setup_parameterizator(p, train_articles)
        for c in classifiers:
            classification.setup_classifier(c)

            total = 0
            correct = 0
            for i in range(5, len(test_articles)):
                print("Evaluate article {0}".format(i))
                a = test_articles[i]
                topic = classification.evaluate(a['body'], train_articles, True)
                print("Evaluated topic: '{0}'  Possible topics: '{1}'".format(topic, a['topics']))
                if topic in a['topics']:
                    correct += 1
                total += 1
            print("Combination of '{0}' and '{1}' results in {2} correct of {3}.".format(p, c, correct, total))
Exemple #9
0
training_params = dict(
    learning_rate=1e-3,
    batch_size=50,
    training_repetitions=15,
    fraction=0.21,
)
# %%
stimuli_set = make_set_from_specs(**creation_params, set_size=set_size,
                                   set_transform_function=set_transform_function,
                                   set_transform_params=set_transform_params)

T = Tempotron(**tempotron_params)

t1 = time()
pre = evaluate(stimuli_set, tempotron=T)
batch_train(stimuli_set, tempotron=T, **training_params)
post = evaluate(stimuli_set, tempotron=T)
d1 = time() - t1
# %%
stimuli_set._make_tempotron_converted()

T = Tempotron(**tempotron_params)

t2 = time()
pre = evaluate(stimuli_set, tempotron=T)
batch_train(stimuli_set, tempotron=T, **training_params)
post = evaluate(stimuli_set, tempotron=T)
d2 = time() - t2

print(d1)
Exemple #10
0
set_size = 200

creation_params = dict(
    frequency=15,
    number_of_neurons=30,
    stimulus_duration=500,
)

set_transform_function = transform.stochastic_release
set_transform_params = dict(
    release_duration=5,
    number_of_vesicles=20,
    stimulus_duration=creation_params['stimulus_duration'],
    release_probability=1,
    num_transformed=50
)

origin_transform_function = transform.symmetric_interval_shift
origin_transform_params = dict(
    stimulus_duration=creation_params['stimulus_duration'],
    interval=(3, 7)
)
# %%
stimuli_set = make_set_from_specs(**creation_params, set_size=set_size,
                                   set_transform_function=set_transform_function,
                                   set_transform_params=set_transform_params)
stimuli_set.shuffle()

evaluate(stimuli_set=stimuli_set, tempotron_tau=2, tempotron_threshold=0.05)
# %%