Exemplo n.º 1
0
def sync_datasets(EMG, IMG, frame_len=0.2, frame_step=0.1):
    X_EMG, SUB_EMG, SES_EMG, TRI_EMG, Y_EMG = EMG
    X_IMG, SUB_IMG, SES_IMG, TRI_IMG, Y_IMG = IMG
    F_SUB = []
    F_SESS = []
    F_Y = []
    F_IMG = []
    F_EMG = []

    # CREATE ACTUAL DATASET
    for subject in range(1, 22):
        for session in range(1, 4):
            for gesture in range(5):
                for trial in range(5):
                    fs = corrections['subject{:02}_session0{}'.format(
                        subject, session)]['fs']

                    idx_emg = np.logical_and.reduce([
                        SUB_EMG == subject, SES_EMG == session,
                        TRI_EMG == trial, Y_EMG == gesture
                    ])
                    idx_img = np.logical_and.reduce([
                        SUB_IMG == subject, SES_IMG == session,
                        TRI_IMG == trial, Y_IMG == gesture
                    ])

                    e = X_EMG[idx_emg][0]
                    f = X_IMG[idx_img]

                    mav = analyze(e,
                                  fs=fs,
                                  frame_len=frame_len,
                                  frame_step=frame_step,
                                  feat='MSV',
                                  preprocess=False)
                    rms = analyze(e,
                                  fs=fs,
                                  frame_len=frame_len,
                                  frame_step=frame_step,
                                  feat='RMS',
                                  preprocess=False)
                    a = np.concatenate([mav, rms], 1)

                    mapping = np.arange(len(a)) * len(f) // len(a)
                    F_EMG.append(a)
                    F_IMG.append(np.stack(f[mapping]))
                    F_SUB.append(np.ones((len(mapping))) * subject)
                    F_SESS.append(np.ones((len(mapping))) * session)
                    F_Y.append(np.ones((len(mapping))) * gesture)

    F_EMG = np.vstack(F_EMG)
    F_IMG = np.vstack(F_IMG)
    F_SUB = np.hstack(F_SUB)
    F_SESS = np.hstack(F_SESS)
    F_Y = np.hstack(F_Y)

    return F_EMG, F_IMG, F_SUB, F_SESS, F_Y
Exemplo n.º 2
0
 def test_match(self):
     line_cost = 2
     values = [
         {'date': '2010-12-31', 'val': 10},
         {'date': '2011-12-31', 'val': 10},
         {'date': '2012-12-31', 'val': 10},
         {'date': '2013-12-31', 'val': 5},
         {'date': '2014-12-31', 'val': 5},
         {'date': '2015-12-31', 'val': 5},
         {'date': '2016-12-31', 'val': 7},
         {'date': '2017-12-31', 'val': 9},
         {'date': '2018-12-31', 'val': 10},
         {'date': '2019-12-31', 'val': 10}
     ]
     rule = classes.LabelRule({
         'name': 'fast_dist',
         'val': 2,
         'change_type': 'GD',
         'duration': ['<', 4]
     })
     trendline = utils.analyze(values, line_cost)
     match = trendline.match_rule(rule)
     self.assertTrue(match is not None)
     self.assertEqual(match.onset_year, 2010)
     self.assertAlmostEqual(match.initial_val, 10.999999999)
     self.assertAlmostEqual(match.magnitude, 6.3999999999999)
     self.assertEqual(match.duration, 3)
Exemplo n.º 3
0
    def _search_video(self, titles, episode):
        best_candidate = (None, 0)

        matcher = difflib.SequenceMatcher()

        # Check over video files and propose our best candidate
        for (fullpath, filename) in utils.regex_find_videos('mkv|mp4|avi', self.config['searchdir']):
            # Use our analyze function to see what's the title and episode of the file
            (candidate_title, candidate_episode) = utils.analyze(filename)

            # Skip this file if we couldn't analyze it or it isn't the episode we want
            if not candidate_title or candidate_episode != episode:
                continue
            
            matcher.set_seq1(candidate_title.lower())

            # We remember to compare all titles (aliases and whatnot)
            for requested_title in titles:
                matcher.set_seq2(requested_title.lower())
                ratio = matcher.ratio()

                # Propose as our new candidate if its ratio is
                # better than threshold and it's better than
                # what we've seen yet
                if ratio > 0.7 and ratio > best_candidate[1]:
                    best_candidate = (fullpath, ratio)

        return best_candidate[0]
Exemplo n.º 4
0
def result(request):
    url = request.POST.get('url')
    print "Getting file"
    cached = cache.get(url)
    if cached:
        response = cached
    else:
        try:
            response = requests.get(url=url)
            cache.set(url, response)
        except:
            message = {"errorMessage": "Invalid URL"}
            return render(request, 'error.html', message)
    try:
        analyze(untar(response))
        return render(request, 'result.html')
    except RuntimeError as output:
        return render(request, 'error.html', {"errorMessage": output})
Exemplo n.º 5
0
def main():

    src_info = {"latitude": 0.0, "longitude": 0.0, "depth_in_m": 10000.0}
    param = {"flag": True, "plot": True, "search_ratio": 0.3}
    path = construct_path_info()
    pprint(path)
    # get receiver weighting
    rec_weights, rec_wcounts, cat_wcounts = \
        get_receiver_weights(src_info, path, param)

    cat_weights = get_category_weights(cat_wcounts)

    src_weights = get_source_weights(cat_wcounts)

    weights = combine_receiver_and_category_weights(
        rec_weights, cat_weights)

    analyze(weights, rec_wcounts, cat_wcounts, src_weights)

    dump_weights(weights, path)
Exemplo n.º 6
0
    def assertAnalyzeEqual(self, line_cost, values, expected_out):
        for actual, expected in zip(list(utils.analyze(values, line_cost)), expected_out):
            self.assertEqual(sorted(actual.keys()), sorted(expected.keys()))

            for actual_key, actual_value in actual.items():
                expected_value = expected[actual_key]

                if actual_key.startswith('eqn'):
                    np.testing.assert_almost_equal(actual_value, expected_value)
                else:
                    self.assertAlmostEqual(actual_value, expected_value)
Exemplo n.º 7
0
    def assertAnalyzeEqual(self, line_cost, values, expected_out):
        for actual, expected in zip(list(utils.analyze(values, line_cost)),
                                    expected_out):
            self.assertEqual(sorted(actual.keys()), sorted(expected.keys()))

            for actual_key, actual_value in actual.items():
                expected_value = expected[actual_key]

                if actual_key.startswith('eqn'):
                    np.testing.assert_almost_equal(actual_value,
                                                   expected_value)
                else:
                    self.assertAlmostEqual(actual_value, expected_value)
Exemplo n.º 8
0
 def __init__(self, stocks, start, end, api=None):
     self.api = utils.set_API(
         ID="PKKHYTOT2ZF2VUS6YK0W",
         key="hXwa9ugbt1FzAYXCZTmSaAxaT5xSnHqyZ2pBkoN3")
     self.stocks = stocks
     self.start = start
     self.end = end
     self.results = utils.analyze(stocks=self.stocks,
                                  start=self.start,
                                  end=self.end,
                                  api=self.api)
     self.id_counter = 0
     self.totalProfit = dict()
     self.capital = 0
     self.recent_log = None
Exemplo n.º 9
0
 def test_match(self):
     line_cost = 2
     target_date = utils.parse_date('2014-07-01')
     values = [{
         'date': '2010-12-31',
         'val': 10
     }, {
         'date': '2011-12-31',
         'val': 10
     }, {
         'date': '2012-12-31',
         'val': 10
     }, {
         'date': '2013-12-31',
         'val': 5
     }, {
         'date': '2014-12-31',
         'val': 5
     }, {
         'date': '2015-12-31',
         'val': 5
     }, {
         'date': '2016-12-31',
         'val': 7
     }, {
         'date': '2017-12-31',
         'val': 9
     }, {
         'date': '2018-12-31',
         'val': 10
     }, {
         'date': '2019-12-31',
         'val': 10
     }]
     rule = classes.LabelRule({
         'name': 'fast_dist',
         'val': 2,
         'change_type': 'GD',
         'duration': ['<', 4]
     })
     trendline = utils.analyze(values, line_cost, target_date)
     match = trendline.match_rule(rule)
     self.assertTrue(match is not None)
     self.assertEqual(match.onset_year, 2010)
     self.assertAlmostEqual(match.initial_val, 10.999999999)
     self.assertAlmostEqual(match.magnitude, 6.3999999999999)
     self.assertEqual(match.duration, 3)
Exemplo n.º 10
0
    def track_process(self):
        if self.playing:
            # Don't do anything if the engine is busy playing a file
            return (1, None)
        
        filename = utils.get_playing_file(self.config['tracker_process'], self.config['searchdir'])
        
        if filename:
            if filename == self.last_filename:
                # It's the exact same filename, there's no need to do the processing again
                return (4, self.last_show_tuple)
            
            self.last_filename = filename

            # Do a regex to the filename to get
            # the show title and episode number
            (show_title, show_ep) = utils.analyze(filename)
            if not show_title:
                return (2, None) # Format not recognized
            
            # Use difflib to see if the show title is similar to
            # one we have in the list
            highest_ratio = (None, 0)
            matcher = difflib.SequenceMatcher()
            matcher.set_seq1(show_title.lower())
            
            # Compare to every show in our list to see which one
            # has the most similar name
            for show in self.get_list():
                titles = self.get_show_titles(show)
                # Make sure to search through all the aliases
                for title in titles:
                    matcher.set_seq2(title.lower())
                    ratio = matcher.ratio()
                    if ratio > highest_ratio[1]:
                        highest_ratio = (show, ratio)
            
            playing_show = highest_ratio[0]
            if highest_ratio[1] > 0.7:
                return (0, (playing_show, show_ep))
            else:
                return (3, None) # Show not in list
        else:
            self.last_filename = None
            return (1, None) # Not playing
Exemplo n.º 11
0
    def analysis_reducer(self, point_wkt, pix_datas):
        """
        Given a point wkt and a list of pix datas in the format:
        [
            {'date': '2011-09-01', 'val': 160.0},
            {'date': '2012-09-01', 'val': 180.0},
            ...
        ]
        perform the landtrendr analysis and change labeling.

        Yields out the change labels and trendline data for the given point
        """
        sys.stdout.write('.')  # for viewing progress
        sys.stdout.flush()

        job = os.environ.get('LT_JOB')
        settings = utils.get_settings(job)

        pix_datas = list(pix_datas)  # save iterator to a list
        pix_trendline = utils.analyze(
            pix_datas,
            settings['line_cost'],
            utils.parse_date(settings['target_date'])
        )

        # write out pix trendline
        for label, val in pix_trendline.mr_label_output().iteritems():
            # prepend 'aux/' to label name so written to sub folder
            yield (
                'trendline/%s' % label,
                {'pix_ctr_wkt': point_wkt, 'value': val}
            )

        label_rules = [
            classes.LabelRule(lr) for lr in settings['label_rules']
        ]

        change_labels = utils.change_labeling(pix_trendline, label_rules)

        # write out change labels
        for label_name, data in change_labels.iteritems():
            for key in ['class_val', 'onset_year', 'magnitude', 'duration']:
                label_key = '%s_%s' % (label_name, key)
                yield label_key, {'pix_ctr_wkt': point_wkt, 'value': data[key]}
Exemplo n.º 12
0
    def analysis_reducer(self, point_wkt, pix_datas):
        """
        Given a point wkt and a list of pix datas in the format:
        [
            {'date': '2011-09-01', 'val': 160.0},
            {'date': '2012-09-01', 'val': 180.0},
            ...
        ]
        perform the landtrendr analysis and change labeling.

        Yields out the change labels and trendline data for the given point
        """
        sys.stdout.write('.')  # for viewing progress
        sys.stdout.flush()

        job = os.environ.get('LT_JOB')
        settings = utils.get_settings(job)

        pix_datas = list(pix_datas)  # save iterator to a list
        pix_trendline = utils.analyze(
            pix_datas, settings['line_cost'],
            utils.parse_date(settings['target_date']))

        # write out pix trendline
        for label, val in pix_trendline.mr_label_output().iteritems():
            # prepend 'aux/' to label name so written to sub folder
            yield ('trendline/%s' % label, {
                'pix_ctr_wkt': point_wkt,
                'value': val
            })

        label_rules = [classes.LabelRule(lr) for lr in settings['label_rules']]

        change_labels = utils.change_labeling(pix_trendline, label_rules)

        # write out change labels
        for label_name, data in change_labels.iteritems():
            for key in ['class_val', 'onset_year', 'magnitude', 'duration']:
                label_key = '%s_%s' % (label_name, key)
                yield label_key, {'pix_ctr_wkt': point_wkt, 'value': data[key]}
Exemplo n.º 13
0
 def track_process(self):
     if self.playing:
         # Don't do anything if the engine is busy playing a file
         return None
     
     filename = self._playing_file(self.config['tracker_process'], self.config['searchdir'])
     
     if filename:
         # Do a regex to the filename to get
         # the show title and episode number
         (show_title, show_ep) = utils.analyze(filename)
         if not show_title:
             self.msg.warn(self.name, 'Regex error. Check logs.')
             utils.log_error("[Regex error] Tracker: %s / Dir: %s / Processed filename: %s\n" % (self.config['tracker_process'], self.config['searchdir'], show_raw))
             return None
         
         # Use difflib to see if the show title is similar to
         # one we have in the list
         highest_ratio = (None, 0)
         matcher = difflib.SequenceMatcher()
         matcher.set_seq1(show_title.lower())
         
         # Compare to every show in our list to see which one
         # has the most similar name
         for show in self.get_list():
             titles = self.get_show_titles(show)
             # Make sure to search through all the aliases
             for title in titles:
                 matcher.set_seq2(title.lower())
                 ratio = matcher.ratio()
                 if ratio > highest_ratio[1]:
                     highest_ratio = (show, ratio)
         
         playing_show = highest_ratio[0]
         if highest_ratio[1] > 0.7:
             return (playing_show, show_ep)
         else:
             self.msg.warn(self.name, 'Found player but show not in list.')
     
     return None
    def calc(sbl_x_thres_min, sbl_x_thres_max, hls_s_thres_min,
             hls_s_thres_max):

        for video_in in videos_in:
            file, ext = video_in.split('/')[-1].split('.')
            logdir = logbase + file
            log = utl.log(logdir)
            name = str(sbl_x_thres_min) + '_' + str(sbl_x_thres_max) + '_' + \
                str(hls_s_thres_min)  + '_' + str(hls_s_thres_max)
            video_out = '../../out/' + file + '_' + name + '.' + ext

            print("From =>", video_in, "To =>", video_out)
            fl = ll.find_lane_lines(mtx, dist, src, dst, M, Minv,
                                    sbl_x_thres_min, sbl_x_thres_max,
                                    hls_s_thres_min, hls_s_thres_max, log)

            clip2 = VideoFileClip(video_in)
            clip = clip2.fl_image(fl.fll)
            clip.write_videofile(video_out, audio=False)
            analyze = utl.analyze(log.lf, log.rf, log.lc, log.rc,
                                  log.left_base_undist, log.right_base_undist,
                                  log.ofset)
            analyze.plot1()
Exemplo n.º 15
0
def lane_line(execute):
    if execute:
        mtx, dist = utl.distort_load(utl.fn.pickle_file)
        src, dst = ll.perspective_transform_values()
        M = ll.perspective_transform_map(src, dst)
        Minv = ll.perspective_transform_map(dst, src)

        combinations = config.get_combinations(1)
        log = utl.log("../../project/img.log")
        utl_fn = utl.fn()
        for combo in combinations:
            sbl_x_thres_min, sbl_x_thres_max, hls_s_thres_min, hls_s_thres_max = combo
            fl = ll.find_lane_lines(mtx, dist, src, dst, M, Minv,
                                    sbl_x_thres_min, sbl_x_thres_max,
                                    hls_s_thres_min, hls_s_thres_max, log)
            for imgfn in utl.fn.testset2:
                img = cv2.imread(imgfn)
                print(imgfn, sbl_x_thres_min, sbl_x_thres_max, hls_s_thres_min,
                      hls_s_thres_max)
                iout = fl.fll(img)
                cv2.imwrite(utl_fn.output(imgfn), iout)
        analyze = utl.analyze(log.lf, log.rf, log.lc, log.rc,
                              log.left_base_undist, log.right_base_undist,
                              log.ofset)
Exemplo n.º 16
0
@author: Hugh Krogh-Freeman
"""
import utils
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import MultinomialNB
import sys
from datetime import datetime

print (datetime.now())
train_filename = sys.argv[1]
test_filename = sys.argv[2]

'''Best Model: Naive Bayes using unigrams'''
utils.classify(train_filename, test_filename)
utils.analyze('model.pkl', 'test.pkl')

'''get data for training and testing'''
train, y_train = utils.get_data(train_filename)
test, y_test = utils.get_data(test_filename)

'''Naive Bayes: unigrams'''
print ('\n\n~~~ Naive Bayes model using unigrams ~~~')
utils.doit(train, y_train, test, y_test, MultinomialNB(alpha=0.2), 1)

'''SVM: bigrams'''
print ('\n\n~~~ Linear SVM model using bigrams ~~~')
svc = LinearSVC(dual=False, penalty='l2', C=1.0, loss='squared_hinge')
utils.doit(train, y_train, test, y_test, svc, 2)

'''logistic regression: trigrams'''
Exemplo n.º 17
0
def get_analytics():
    analytics = utils.analyze(pipe, t, e, utils.categories(test))
    # console.log('\n'+str(analytics))
    return analytics
Exemplo n.º 18
0
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 24 14:00:05 2017

@author: Hugh Krogh-Freeman
"""
import utils
import sys

model_filename, vectorized_test_data_filename = sys.argv[1], sys.argv[2]
utils.analyze(model_filename, vectorized_test_data_filename)
Exemplo n.º 19
0
import sys
import os
from board import TosBoard
import utils
import config as cfg

# get board object
initBoard = TosBoard()
# init
initBoard.randomInitialized()
if cfg.inputFileName != None:
    if cfg.inputFileName.endswith("txt"):
        initBoard.initFromFile(cfg.inputFileName)
    else:
        initBoard.initFromScreenshot(cfg.inputFileName)
# test evaluation
stones, boundary, combo, end = initBoard.evaluate()
# start searching
bestBoard, finalMoveList = utils.analyze(initBoard)
# evaluate and visualization
stones, boundary, combo, end = bestBoard.evaluate()
utils.visualizePath(initBoard, bestBoard, finalMoveList)
# dump result to txt file
with open(os.path.join(cfg.outputDir, "output.txt"), "w") as fout:
    fout.write("startRowIdx={}\n".format(initBoard.currentPosition[0]))
    fout.write("startColIdx={}\n".format(initBoard.currentPosition[1]))
    for move in finalMoveList:
        fout.write("{} ".format(move))
    fout.write("\nstones={}\ncombo={}\nsteps={}\n".format(
        stones, combo, len(finalMoveList)))
Exemplo n.º 20
0
def search_response(request):
    query_string = request.GET['searchField']
    tweets = search_tweets(query_string)
    analyzed_tweets = analyze(tweets)
    return render_to_response("response.html", context_instance=RequestContext(request, {'tweets': analyzed_tweets}))