Example #1
0
    def action(self):
        State.level = 0

        realGuy = Funcs.ship_assign(State.picked_ship,
                                    State.start_lives,
                                    player=True)

        Scripts.main_loop(realGuy)
Example #2
0
 def predict(self, x_data, batch_size=500):
     assert isinstance(
         x_data, numpy.ndarray), "input features must be a numpy array"
     assert len(x_data.shape) == 2, "it must be an array of feature vectors"
     logger.info('classifier prediction called')
     logger.debug('x_data shape: {0}'.format(x_data.shape))
     logger.debug('forming prediction function')
     x_data = Scripts.shared_dataset(data_x=x_data)
     givens = {
         self.x:
         x_data[self.index * batch_size:(self.index + 1) * batch_size]
     }
     pred_model = theano.function(inputs=[self.index],
                                  outputs=self.layers[-1].y_pred,
                                  givens=givens,
                                  on_unused_input='warn',
                                  allow_input_downcast=True)
     logger.debug('input shape: {0}'.format(
         x_data.get_value(borrow=True).shape))
     logger.info('beginning prediction on x_data')
     n_batches = x_data.get_value(borrow=True).shape[0] / batch_size
     result = []
     for batch_index in range(n_batches):
         logger.debug('processing batch {0}'.format(batch_index))
         batch_result = pred_model(batch_index)
         logger.debug('result generated')
         result = numpy.hstack((result, batch_result))
     logger.debug('output shape: {0}'.format(len(result)))
     # batch size, rows, columns, channels.
     return result
    def __init__(self, app, window):

        # Application (front end) instance
        self.app = app

        # Root window instance from the main app
        self.window = window

        # Instantiate database object
        self.db = Database('Database1.db')

        # Connect to utilities
        self.utils = Utils()

        # Get access to scripts
        self.scripts = Scripts.Scripts(self.db)

        # The limit to number of records in the Customer table
        # Warning: If you increase limit, need to add more names in names.txt
        self.recordLimit = 1000

        # Current user username
        self.user = ''

        # Current user's permissions in the application
        self.permissions = {}

        # Email setting
        self.email = False
Example #4
0
 def score(self, test_data, test_labels, batch_size=500):
     logger.info('Generating Classification Score')
     logger.debug('creating shared datasets')
     test_data, test_labels = Scripts.shared_dataset(data_x=test_data, data_y=test_labels)
     logger.debug('producing batch information')
     n_test_batches = test_data.get_value(borrow=True).shape[0]
     n_test_batches /= batch_size
     logger.debug('generating theano functions')
     test_givens = {self.x: test_data[self.index * batch_size: (self.index + 1) * batch_size], self.y: test_labels[self.index * batch_size: (self.index + 1) * batch_size]}
     test_model = theano.function(inputs=[self.index], outputs=self.layers[-1].errors(self.y), givens=test_givens, on_unused_input='warn')
     logger.debug('producing test results')
     losses = [test_model(i) for i in range(n_test_batches)]
     return 1.0-numpy.mean(losses)
Example #5
0
    def action(self):
        global realGuy
        # global t
        State.t = False

        for object in State.player_group:

            object.speed = [0, 0]
            object.kill()
        for object in State.movable:
            object.kill()
        for object in State.interface:
            object.kill()

        realGuy = Funcs.ship_assign(State.picked_ship,
                                    State.start_lives,
                                    player=True)

        State.save['level'] = 0

        Funcs.spawn_wave(realGuy)
        Scripts.main_loop(realGuy)
Example #6
0
    def __init__(self):
        super(MainWindow, self).__init__()

        Globals.MainWindow = self

        self.Toolbar = QtGui.QToolBar()
        self.Toolbar.setObjectName('MainToolBar')
        
        self.addToolBar(self.Toolbar)
        self.setUnifiedTitleAndToolBarOnMac(True)

        self.scripts2 = Scripts.Scripts2(self)
        self.setCentralWidget(self.scripts2)

        statusBar = QtGui.QStatusBar()
        self.setStatusBar(statusBar)
        self.displayStatusMessageSignal.connect(statusBar.showMessage)

        self.restoreStateAndGeometry()
Example #7
0
 def predict(self, x_data, batch_size=500):
     assert isinstance(x_data, numpy.ndarray), "input features must be a numpy array"
     assert len(x_data.shape) == 2, "it must be an array of feature vectors"
     logger.info('classifier prediction called')
     logger.debug('x_data shape: {0}'.format(x_data.shape))
     logger.debug('forming prediction function')
     x_data = Scripts.shared_dataset(data_x=x_data)
     givens = {self.x: x_data[self.index * batch_size: (self.index + 1) * batch_size]}
     pred_model = theano.function(inputs=[self.index], outputs=self.layers[-1].y_pred, givens=givens, on_unused_input='warn', allow_input_downcast=True)
     logger.debug('input shape: {0}'.format(x_data.get_value(borrow=True).shape))
     logger.info('beginning prediction on x_data')
     n_batches = x_data.get_value(borrow=True).shape[0]/batch_size
     result = []
     for batch_index in range(n_batches):
         logger.debug('processing batch {0}'.format(batch_index))
         batch_result = pred_model(batch_index)
         logger.debug('result generated')
         result = numpy.hstack((result, batch_result))
     logger.debug('output shape: {0}'.format(len(result)))
     # batch size, rows, columns, channels.
     return result
Example #8
0
 def score(self, test_data, test_labels, batch_size=500):
     logger.info('Generating Classification Score')
     logger.debug('creating shared datasets')
     test_data, test_labels = Scripts.shared_dataset(data_x=test_data,
                                                     data_y=test_labels)
     logger.debug('producing batch information')
     n_test_batches = test_data.get_value(borrow=True).shape[0]
     n_test_batches /= batch_size
     logger.debug('generating theano functions')
     test_givens = {
         self.x:
         test_data[self.index * batch_size:(self.index + 1) * batch_size],
         self.y:
         test_labels[self.index * batch_size:(self.index + 1) * batch_size]
     }
     test_model = theano.function(inputs=[self.index],
                                  outputs=self.layers[-1].errors(self.y),
                                  givens=test_givens,
                                  on_unused_input='warn')
     logger.debug('producing test results')
     losses = [test_model(i) for i in range(n_test_batches)]
     return 1.0 - numpy.mean(losses)
Example #9
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Will Brennan'


# Built-in Modules
# Standard Modules
import theano
# Custom Modules
import Scripts
import DeepConv
from sklearn.externals import joblib

if __name__ == '__main__':
    args = Scripts.get_args()
    logger = Scripts.get_logger(quiet=args.quiet, debug=args.debug)
    data = Scripts.get_mnist()
    data = Scripts.normalise(data)
    x, x_test, y, y_test = Scripts.sklearn2theano(data)
    classifier = DeepConv.DeepConv(args)
    classifier.fit(data=x, labels=y, test_data=x_test, test_labels=y_test, n_epochs=args.n_epochs, batch_size=args.batch_size)
    y_pred = classifier.predict(x_test)
    classifier.score_report(y_test=y_test, y_pred=y_pred)
    logger.info('Classifier Scoring: {0}'.format(classifier.score(x_test, y_test)))
    joblib.dumps(classifier, 'digits.pkl')
Example #10
0
#!/usr/bin/python3
# -*- coding: utf-8 -*-

import os
import sys
import subprocess

installed_packages = [
    p.decode().split("==")[0] for p in subprocess.check_output(
        [sys.executable, "-m", "pip", "freeze"]).split()
]

packages = ["numpy", "Pillow", "PyPDF2"]

for package in packages:
    if package not in installed_packages:
        os.system(f"{sys.executable} -m pip install {package}")

if __name__ == "__main__":
    import Scripts
    Scripts.main()
def upload():

    if request.method == 'POST':

        #Checks if username is correct
        user_name = request.form.get('input_user')
        if user_name != '':
            return render_template('invalid_username.html')

        #Checks if password is correct
        password = request.form.get('input_password')
        if password != '':
            return render_template('invalid_password.html')

        # Retrieve data submitted through the 'Contribute' form
        submission = request.form.get('new_submission_name')

        now = datetime.datetime.now().date()

        notebook = request.form.get("notebook_link")

        int_type = request.form.get('input_interaction')

        organism = request.form.get('input_species')

        resource = request.form.get('input_resource')

        file_type = request.form.get('input_type')

        file = request.files['input_file']

        ###### Processing 'file' and uploading data to server ###############

        #wrap FileStorage object in a TextIOWrapper
        #So file can later be used
        f = io.TextIOWrapper(file)

        # Submit data on submissions before processing the data
        #Must be done prior to processing the data so that the
        #submissions foreign key can be integrated into the
        # interactions table inserted when the file is processed
        submissions = pd.DataFrame(
            columns=['submission_name', 'submission_type_fk', 'resource_fk'])
        submissions = submissions.append([{
            'submission_name': submission,
            'submission_type_fk': int(int_type),
            'resource_fk': int(resource),
            'date_contributed': str(now),
            'processing_script': str(notebook),
            'file_type_fk': int(file_type)
        }])

        #Insert submissions into the table
        insert_s = "INSERT INTO submissions (submission_name, submission_type_fk, resource_fk, date_contributed, processing_script, file_type_fk) VALUES" + ', '.join(
            [
                '("{submission_name}", {submission_type_fk}, {resource_fk}, "{date_contributed}", "{processing_script}", {file_type_fk})'
                .format(**rowData)
                for index, rowData in submissions.iterrows()
            ])
        sub = engine.execute(insert_s)

        #Retrieve id of last insert into a database table (i.e. submission foreign key)
        submission_fk = sub.lastrowid
        print(submission_fk)

        #Process a gmt file if file type = 1
        if file_type == '1':
            gmt = f
            gmt_data = [x.strip().split('\t') for x in gmt.readlines()]
            sig_version = Scripts.GMT_to_SIG(gmt_data,
                                             secure_filename(file.filename))
            interactions = Scripts.SIG_to_Genes(sig_version, submission_fk,
                                                organism, engine)
            insert_gene = "INSERT INTO interactions (source_gene_fk, target_gene_fk, submission_fk) VALUES" + ', '.join(
                [
                    '({source_gene_fk}, {target_gene_fk}, {submission_fk})'.
                    format(**rowData)
                    for index, rowData in interactions.iterrows()
                ])
            engine.execute(insert_gene)

            ##### Calculate Website Statistics for 'Submissions' Page for GMT File ################

            stat_df = sig_version.drop([1, 2, 3, 4, 6, 7, 8, 9, 10, 11, 12],
                                       axis=1)
            stat_df.columns = ['ProteinA', 'ProteinB']

            # First stat: Number of Total Interactions
            interaction_num = len(stat_df)

            # Second stat: Number of Sources/Hubs
            hub_terms = len(stat_df.ProteinA.unique())
            print(hub_terms)

            # Third stat: Number of Targets
            target_terms = len(stat_df.ProteinB.unique())
            print(target_terms)

            #Fourth stat: Total Number of Unique Terms
            if stat_df['ProteinA'].str.contains('_').all():
                stat_df['ProteinA'] = [
                    x.split('_')[:-1] for x in stat_df.ProteinA
                ]
                stat_df['ProteinA'] = [
                    '_'.join(x) for x in stat_df['ProteinA']
                ]

            if stat_df['ProteinA'].str.contains('-').all():
                stat_df['ProteinA'] = [
                    x.split('-')[:-1] for x in stat_df.ProteinA
                ]
                stat_df['ProteinA'] = [
                    '-'.join(x) for x in stat_df['ProteinA']
                ]

            else:
                pass

            if stat_df['ProteinB'].str.contains(',').all():
                stat_df['ProteinB'] = [
                    x.split(',')[:-1] for x in stat_df.ProteinB
                ]
                stat_df['ProteinB'] = [
                    ','.join(x) for x in stat_df['ProteinB']
                ]

            else:
                pass

            unique_terms = len(
                pd.concat([stat_df.ProteinA, stat_df.ProteinB],
                          axis=0).unique())
            print(unique_terms)

            #Fifth stat: Avg. Interactions per Term
            stat_df.set_index('ProteinA', inplace=True)
            stat_df = stat_df.groupby('ProteinA').agg(lambda x: tuple(x))
            stat_df['targets'] = [
                int(len(lst))
                for source, lst in stat_df['ProteinB'].iteritems()
            ]

            avg_term = stat_df.targets.mean(axis=0)
            print(round(avg_term))

            insert_stat = "INSERT INTO statistics (interaction_num, hub_terms, target_terms, unique_terms, avg_term, submission_fk) VALUES" + '(%d' % interaction_num + ', %d' % hub_terms + ', %d,' % target_terms + '%d,' % unique_terms + '%d,' % avg_term + ' %d)' % submission_fk
            engine.execute(insert_stat)

        #Process a sig file if the file type = 2
        if file_type == '2':
            sig = pd.read_table(f, header=None)
            if len(sig.columns) == 1:
                sig = pd.read_table(
                    f,
                    header=None,
                    names=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
                    sep=' ')
            interactions = Scripts.SIG_to_Genes(sig, submission_fk, organism,
                                                engine)
            insert_gene = "INSERT INTO interactions (source_gene_fk, target_gene_fk, submission_fk) VALUES" + ', '.join(
                [
                    '({source_gene_fk}, {target_gene_fk}, {submission_fk})'.
                    format(**rowData)
                    for index, rowData in interactions.iterrows()
                ])
            engine.execute(insert_gene)

            ##### Calculate Website Statistics for 'Submissions' Page for GMT File ################

            stat_df = sig.drop([1, 2, 3, 4, 6, 7, 8, 9, 10, 11, 12], axis=1)
            stat_df.columns = ['ProteinA', 'ProteinB']

            # First stat: Number of Total Interactions
            interaction_num = len(stat_df)

            # Second stat: Number of Sources/Hubs
            hub_terms = len(stat_df.ProteinA.unique())
            print(hub_terms)

            # Third stat: Number of Targets
            target_terms = len(stat_df.ProteinB.unique())
            print(target_terms)

            #Fourth stat: Total Number of Unique Terms
            if stat_df['ProteinA'].str.contains('_').all():
                stat_df['ProteinA'] = [
                    x.split('_')[:-1] for x in stat_df.ProteinA
                ]
                stat_df['ProteinA'] = [
                    '_'.join(x) for x in stat_df['ProteinA']
                ]

            if stat_df['ProteinA'].str.contains('-').all():
                stat_df['ProteinA'] = [
                    x.split('-')[:-1] for x in stat_df.ProteinA
                ]
                stat_df['ProteinA'] = [
                    '-'.join(x) for x in stat_df['ProteinA']
                ]

            else:
                pass

            if stat_df['ProteinB'].str.contains(',').all():
                stat_df['ProteinB'] = [
                    x.split(',')[:-1] for x in stat_df.ProteinB
                ]
                stat_df['ProteinB'] = [
                    ','.join(x) for x in stat_df['ProteinB']
                ]

            else:
                pass

            unique_terms = len(
                pd.concat([stat_df.ProteinA, stat_df.ProteinB],
                          axis=0).unique())
            print(unique_terms)

            #Fifth stat: Avg. Interactions per Term
            stat_df.set_index('ProteinA', inplace=True)
            stat_df = stat_df.groupby('ProteinA').agg(lambda x: tuple(x))
            stat_df['targets'] = [
                int(len(lst))
                for source, lst in stat_df['ProteinB'].iteritems()
            ]

            avg_term = stat_df.targets.mean(axis=0)
            print(round(avg_term))

            insert_stat = "INSERT INTO statistics (interaction_num, hub_terms, target_terms, unique_terms, avg_term, submission_fk) VALUES" + '(%d' % interaction_num + ', %d' % hub_terms + ', %d,' % target_terms + '%d,' % unique_terms + '%d,' % avg_term + ' %d)' % submission_fk
            engine.execute(insert_stat)

    #Return to 'Contribute' page after making the submission
    return redirect(url_for('Contribute'))
Example #12
0
def main():
    #Scripts.saveABPlot(1, 10, 20, 80)
    Scripts.saveHuePlot(1, 50, 20, 70)
Example #13
0
 def test_sensor_data(self):
     sensor = sens.Sensor()
     self.assertGreater(
         sensor.get_temp(), 0,
         "getTemp should give temperature above 0, throw error if no sensor is plugged in or the RPI is not used to test"
     )
Example #14
0
 def fit(self,
         data,
         labels,
         test_data,
         test_labels,
         learning_rate=0.1,
         n_epochs=250,
         nkerns=[20, 50],
         batch_size=500):
     logger.info('Initialising the classifier')
     rng = numpy.random.RandomState()
     data, labels = Scripts.shared_dataset(data_x=data, data_y=labels)
     test_data, test_labels = Scripts.shared_dataset(data_x=test_data,
                                                     data_y=test_labels)
     if batch_size < 1:
         batch_size = data.get_value(borrow=True).shape[0]
     n_train_batches = data.get_value(borrow=True).shape[0] / batch_size
     n_test_batches = test_data.get_value(borrow=True).shape[0] / batch_size
     logger.info('Constructing the classifier')
     self.layers = []
     self.layers.append(
         Layers.PoolingLayer(rng,
                             input=self.x.reshape((batch_size, 1, 28, 28)),
                             image_shape=(batch_size, 1, 28, 28),
                             filter_shape=(nkerns[0], 1, 5, 5),
                             poolsize=(2, 2)))
     self.layers.append(
         Layers.PoolingLayer(rng,
                             input=self.layers[-1].output,
                             image_shape=(batch_size, nkerns[0], 12, 12),
                             filter_shape=(nkerns[1], nkerns[0], 5, 5),
                             poolsize=(2, 2)))
     self.layers.append(
         Layers.HiddenLayer(rng,
                            input=self.layers[-1].output.flatten(2),
                            n_in=nkerns[1] * 4 * 4,
                            n_out=500,
                            activation=T.tanh))
     self.layers.append(
         Layers.LogisticRegression(input=self.layers[-1].output,
                                   n_in=500,
                                   n_out=10))
     test_givens = {
         self.x:
         test_data[self.index * batch_size:(self.index + 1) * batch_size],
         self.y:
         test_labels[self.index * batch_size:(self.index + 1) * batch_size]
     }
     self.test_model = theano.function([self.index],
                                       self.layers[-1].errors(self.y),
                                       givens=test_givens)
     params = self.layers[0].params + self.layers[1].params + self.layers[
         2].params + self.layers[3].params
     cost = self.layers[-1].negative_log_likelihood(self.y)
     grads = T.grad(cost, params)
     updates = [(param_i, param_i - learning_rate * grad_i)
                for param_i, grad_i in zip(params, grads)]
     train_givens = {
         self.x:
         data[self.index * batch_size:(self.index + 1) * batch_size],
         self.y:
         labels[self.index * batch_size:(self.index + 1) * batch_size]
     }
     self.train_model = theano.function([self.index],
                                        cost,
                                        updates=updates,
                                        givens=train_givens)
     patience, patience_increase = 10000, 2
     validation_frequency = min(n_train_batches, patience / 2)
     epoch, count = 0, 0
     start_time = time.time()
     n_iters = n_epochs * n_train_batches
     logger.info("Fitting Classifier")
     logger.debug("{0} epochs, {1} batches, {2} iterations".format(
         n_epochs, n_train_batches, n_iters))
     while epoch < n_epochs and patience > count:
         epoch += 1
         for minibatch_index in xrange(n_train_batches):
             count = (epoch - 1) * n_train_batches + minibatch_index
             if count % 50 == 0:
                 percentage = round(100.0 * count / n_iters, 2)
                 if percentage == 0:
                     time_stamp = "Null"
                 else:
                     time_stamp = datetime.utcfromtimestamp(
                         (time.time() - start_time) * (100.0 / percentage) +
                         start_time)
                 logger.info(
                     "training is {0}% complete (Completion at {1})".format(
                         round(percentage, 2), time_stamp))
             train_cost = self.train_model(minibatch_index)
             if (count + 1) % validation_frequency == 0:
                 testlosses = [
                     self.test_model(i) for i in xrange(n_test_batches)
                 ]
                 test_score = numpy.mean(testlosses)
                 logger.info(
                     'Test error of {0}% achieved on Epoch {1} Iteration {2}'
                     .format(test_score * 100.0, epoch, count + 1))
             logger.debug("Iteration number {0}".format(count))
     logger.debug('Optimization complete.')
     logger.debug('Conducting final model testing')
     testlosses = [self.test_model(i) for i in xrange(n_test_batches)]
     test_score = numpy.mean(testlosses)
     t_taken = int((time.time() - start_time) / 60.0)
     logger.info('Training Complete')
     logger.info('Test score of {0}%, training time {1}m'.format(
         test_score * 100.0, t_taken))
     if self.args_save:
         self.save()
Example #15
0
def upload():

	if request.method == 'POST':


		#this works!
		#Check if username is correct!
		user_name = request.form.get('input_user')
		if user_name != '':
			return "Invalid username, please try again" 

		#this works!
		#Check if password is correct!
		password = request.form.get('input_password')
		if password != '':
			return "Invalid password, please try again" 

		#this works!
		submission = request.form.get('new_submission_name')

		#this works!
		date = request.form.get('date')
		print(date)

		#this works!
		notebook = request.form.get("notebook_link")
		print(notebook)
		
		#this works!
		int_type = request.form.get('input_interaction')

		#this works!
		organism = request.form.get('input_species')
		

		#this works!
		resource = request.form.get('input_resource')

		#this works!
		file_type = request.form.get('input_type')


		#this works!
		file = request.files['input_file']
		print(file)


		###### Processing 'file' and uploading data to server ###############


		#this works! (for now)
		f = io.TextIOWrapper(file)
		print(f)

		#this works!
		submissions = pd.DataFrame(columns = ['submission_name', 'submission_type_fk', 'resource_fk'])
		submissions = submissions.append([{
			'submission_name': submission,
			'submission_type_fk': int(int_type),
			'resource_fk': int(resource)
			}])
		print(submissions)

		#When uncommented, this works! UNCOMMENT FOR FINAL TRIAL
		insert_s = "INSERT INTO submissions (submission_name, submission_type_fk, resource_fk) VALUES"+ ', '.join(['("{submission_name}", {submission_type_fk}, {resource_fk})'.format(**rowData) for index, rowData in submissions.iterrows()])
		sub = engine.execute(insert_s)

		submission_fk = sub.lastrowid
		print(submission_fk)

		#add other 'if' statements to send file to right python processor based on int_type

		#also at some point once this whole process is over we also want to save these files 
		#somewhere and convert them to their alternate format --> I know procedure to save locally 
		#but is this what we really want? --> Won't be automatic uploads elsewhere

		#this works!
		if file_type == '1':
			gmt = f
			gmt_data = [x.strip().split('\t') for x in gmt.readlines()]
			sig_version = Scripts.GMT_to_SIG(gmt_data, secure_filename(file.filename))
			print(sig_version.head())
			interactions = Scripts.SIG_to_Genes(sig_version, submission_fk, organism, engine)
			insert_gene = "INSERT INTO interactions (source_gene_fk, target_gene_fk, submission_fk) VALUES"+ ', '.join(['({source_gene_fk}, {target_gene_fk}, {submission_fk})'.format(**rowData) for index, rowData in interactions.iterrows()])
			engine.execute(insert_gene)
			

		#this works!
		if file_type == '2':
			sig = pd.read_table(f, header = None)
			if 5 not in sig.columns:
				sig = pd.read_table(f, header = None, 
				names = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], sep = ' ')
			print(sig.head())
			interactions = Scripts.SIG_to_Genes(sig, submission_fk, organism, engine)
			insert_gene = "INSERT INTO interactions (source_gene_fk, target_gene_fk, submission_fk) VALUES"+ ', '.join(['({source_gene_fk}, {target_gene_fk}, {submission_fk})'.format(**rowData) for index, rowData in interactions.iterrows()])
			engine.execute(insert_gene)

	return redirect(url_for('Contribute'))
Example #16
0
 def fit(self, data, labels, test_data, test_labels, learning_rate=0.1, n_epochs=250, nkerns=[20, 50], batch_size=500):
     logger.info('Initialising the classifier')
     rng = numpy.random.RandomState()
     data, labels = Scripts.shared_dataset(data_x=data, data_y=labels)
     test_data, test_labels = Scripts.shared_dataset(data_x=test_data, data_y=test_labels)
     if batch_size < 1:
         batch_size = data.get_value(borrow=True).shape[0]
     n_train_batches = data.get_value(borrow=True).shape[0]/batch_size
     n_test_batches = test_data.get_value(borrow=True).shape[0]/batch_size
     logger.info('Constructing the classifier')
     self.layers = []
     self.layers.append(Layers.PoolingLayer(
         rng,
         input=self.x.reshape((batch_size, 1, 28, 28)),
         image_shape=(batch_size, 1, 28, 28),
         filter_shape=(nkerns[0], 1, 5, 5),
         poolsize=(2, 2)
     ))
     self.layers.append(Layers.PoolingLayer(
         rng,
         input=self.layers[-1].output,
         image_shape=(batch_size, nkerns[0], 12, 12),
         filter_shape=(nkerns[1], nkerns[0], 5, 5),
         poolsize=(2, 2)
     ))
     self.layers.append(Layers.HiddenLayer(
         rng,
         input=self.layers[-1].output.flatten(2),
         n_in=nkerns[1] * 4 * 4,
         n_out=500,
         activation=T.tanh
     ))
     self.layers.append(Layers.LogisticRegression(
         input=self.layers[-1].output,
         n_in=500,
         n_out=10
     ))
     test_givens = {self.x: test_data[self.index * batch_size: (self.index + 1) * batch_size], self.y: test_labels[self.index * batch_size: (self.index + 1) * batch_size]}
     self.test_model = theano.function([self.index], self.layers[-1].errors(self.y), givens=test_givens)
     params = self.layers[0].params + self.layers[1].params + self.layers[2].params + self.layers[3].params
     cost = self.layers[-1].negative_log_likelihood(self.y)
     grads = T.grad(cost, params)
     updates = [(param_i, param_i - learning_rate * grad_i) for param_i, grad_i in zip(params, grads)]
     train_givens = {self.x: data[self.index * batch_size: (self.index + 1) * batch_size], self.y: labels[self.index * batch_size: (self.index + 1) * batch_size]}
     self.train_model = theano.function([self.index], cost, updates=updates, givens=train_givens)
     patience, patience_increase = 10000, 2
     validation_frequency = min(n_train_batches, patience / 2)
     epoch, count = 0, 0
     start_time = time.time()
     n_iters = n_epochs*n_train_batches
     logger.info("Fitting Classifier")
     logger.debug("{0} epochs, {1} batches, {2} iterations".format(n_epochs, n_train_batches, n_iters))
     while epoch < n_epochs and patience > count:
         epoch += 1
         for minibatch_index in xrange(n_train_batches):
             count = (epoch - 1) * n_train_batches + minibatch_index
             if count % 50 == 0:
                 percentage = round(100.0*count/n_iters, 2)
                 if percentage == 0:
                     time_stamp = "Null"
                 else:
                     time_stamp = datetime.utcfromtimestamp((time.time()-start_time)*(100.0/percentage)+start_time)
                 logger.info("training is {0}% complete (Completion at {1})".format(round(percentage, 2), time_stamp))
             train_cost = self.train_model(minibatch_index)
             if (count + 1) % validation_frequency == 0:
                 testlosses = [self.test_model(i) for i in xrange(n_test_batches)]
                 test_score = numpy.mean(testlosses)
                 logger.info('Test error of {0}% achieved on Epoch {1} Iteration {2}'.format(test_score*100.0, epoch, count+1))
             logger.debug("Iteration number {0}".format(count))
     logger.debug('Optimization complete.')
     logger.debug('Conducting final model testing')
     testlosses = [self.test_model(i) for i in xrange(n_test_batches)]
     test_score = numpy.mean(testlosses)
     t_taken = int((time.time()-start_time)/60.0)
     logger.info('Training Complete')
     logger.info('Test score of {0}%, training time {1}m'.format(test_score*100.0, t_taken))
     if self.args.save:
         self.save()
Example #17
0
     entable = table1
     enarm = arm1
     face = face2
     enface = face1
 else:
     table = table1
     arm = arm1
     deck = deck1
     entable = table2
     enarm = arm2
     face = face1
     enface = face2
 if deck: arm.append(deck.pop(randint(0, len(deck) - 1)))
 Description.screen(arm, table, entable, enarm, face, enface)
 inp = Scripts.select(
     "'pass' to pass the turn,'cast' to cast the card or 'attack' to attack with casted card\n",
     -1, ["pass", "cast", "attack"])
 if inp == "pass":
     turn = not turn
 elif inp == "cast":
     inp = Scripts.select("select number of card to cast or 'cancel'\n",
                          len(arm), [
                              "cancel",
                          ])
     if inp == "cancel":
         pass
     else:
         table.append(arm.pop(inp))
 elif inp == "attack":
     inp = Scripts.select(
         "select number of card to attack with or 'cancel'\n", len(table), [
Example #18
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Will Brennan'

# Built-in Modules
# Standard Modules
import theano
# Custom Modules
import Scripts
import DeepConv

if __name__ == '__main__':
    args = Scripts.get_args()
    logger = Scripts.get_logger(quiet=args.quiet, debug=args.debug)
    data = Scripts.get_mnist()
    data = Scripts.normalise(data)
    x, x_test, y, y_test = Scripts.sklearn2theano(data)
    classifier = DeepConv.DeepConv(save=args.save,
                                   load=args.load,
                                   debug=args.debug)
    classifier.fit(data=x,
                   labels=y,
                   test_data=x_test,
                   test_labels=y_test,
                   n_epochs=args.n_epochs,
                   batch_size=args.batch_size)
    y_pred = classifier.predict(x_test)
    classifier.score_report(y_test=y_test, y_pred=y_pred)
    logger.info('Classifier Scoring: {0}'.format(
        classifier.score(x_test, y_test)))
    Scripts.confusion_matrix(y_test, y_pred)
Example #19
0
    if isinstance(args_string, str):
        args_string = [arg for arg in args_string.split(' ') if arg != ""]
        args = parser.parse_args(args_string)
    else:
        args = parser.parse_args()
    return args


def frame_diff(img_vold, img_old, new_img):
    img_diff0 = cv2.absdiff(new_img, img_old)
    img_diff1 = cv2.absdiff(img_old, img_vold)
    return cv2.bitwise_or(img_diff0, img_diff1)

if __name__ == '__main__':
    args = get_args()
    logger = Scripts.get_logger(quiet=args.quiet, debug=args.debug)
    args.video_path = os.path.abspath(args.video_path)
    logger.debug('searching for frame at {0}'.format(args.video_path))
    assert os.path.exists(args.video_path), '{0} is not a valid path'.format(args.video_path)
    logger.info('loading video from {0}'.format(args.video_path))
    cam = cv2.VideoCapture(args.video_path)
    data = numpy.array([], dtype=numpy.float)
    frames = []
    frame_id = 0
    triggered = False
    while True:
        ret, frame = cam.read()
        if ret:
            frame_id += 1
            logger.debug('processing {0} frame'.format(frame_id))
            len_frame = len(frames)