Ejemplo n.º 1
0
    def analyze_pairs(self):
        # Analyze pairs
        ## This is a very basic algorithm that only makes use of geography
        ## TODO: Update algorithm to incorporate more than geography
        analyze = Analyze(self.users)
        analyze.analyze_easy_potentials()
        match_results = analyze.get_best_matches()
        potential_pairs = analyze.get_potential_pairs()

        return match_results
Ejemplo n.º 2
0
    def __init__(self, analysis: Analyze):
        """ Analyze constructor

            :param analysis: Analize object after processing
        """
        self.logger = logger.Logger(name=__name__, log_level=logger.INFO)
        self.my_logger = self.logger.logger
        self.my_logger.info('INIT')

        self.analysis = analysis
        self.headings = TheConfig.headings  #: users headings configuration
        self.menubar_spec = TheConfig.menubar  #: user menubar configuration
        self.menubar_data = analysis.menubar()  #: menubar scanned data
        self.output = [TheConfig.HEADER_HTML
                       ]  #: start with bookmarks file header
        self.indent = 0  #: level to indent

        #: date stamp for all html entities we create
        self.datestamp = int(datetime.datetime.now().timestamp())

        # create output structure
        self.write(TheConfig.LIST_HTML)
        self.write_toolbar_heading('Bookmarks Bar')
        self.begin_list()
        self.write_section('head')
        for section in self.headings:
            self.write_section(section)
        self.write_section('tail')
        self.end_list()
        pass
Ejemplo n.º 3
0
def results():
    link = request.form['address']
    des, dia = Process(link)
    if len(des) == 0 or len(dia) == 0:
        error = True
        return redirect(url_for('home', error=error))
    timestamp = Analyze(des, dia)
    print(timestamp)
    return render_template("results.html", timestamp=timestamp)
Ejemplo n.º 4
0
 def analyze(self):
     self.ProgressVar.set(0)
     if self.SPathUnknown == None or self.SInstrument == None or (
             self.saveVar.get() == 1 and self.SPathForSaving == None):
         messagebox.showerror(
             'Ojej! Wystąpił błąd.',
             'Nie podano ścieżki do plików bądź nie wybrano instrumentu!')
     elif self.restartVar == None:
         action = Analyze(self.SInstrument,
                          self.SPathUnknown,
                          callback=self.results,
                          callback_progress=self.progress_bar)
         action.start()
         self.restartVar == self.SInstrument
     else:
         if os.path.exists('mfcc/mfccs_unknown_' + self.restartVar + '.p'):
             os.remove('mfcc/mfccs_unknown_' + self.restartVar + '.p')
         action = Analyze(self.SInstrument,
                          self.SPathUnknown,
                          callback=self.results,
                          callback_progress=self.progress_bar)
         action.start()
Ejemplo n.º 5
0
from analyze import Vector,Analyze


global vector
vector = Vector()
vector.set_corpus(collection_name="poi",key="Categories")
vector.set_initial_vector(collection_name="categories",key="Name")

global analyze
analyze = Analyze()
Ejemplo n.º 6
0
def makePlotFor(param):
    allData = []
    for dset in datasets:
        print '-------------------------------'
        print 'Extracting from', dset
        path = dpaths[dset]

        print 'ANALYZING', path

        stack_names = sorted([
            name for name in listdir(path)
            if not name.lower().count("unusable")
            if not name.lower().count("ignore") if isdir(pjoin(path, name))
        ])

        #print 'Stacks', stack_names

        specs = {
            'LEG_BODY_ANGLE': {
                'PLOT': 'BOX',
                'ENTITIES': legs,
                'YLIMS': (-220, 220),
                'YLABEL': 'Degrees',
                'TITLE': 'Leg-Body Angle'
            },
            'SWING_AMPLITUDE': {
                'PLOT': 'BOX',
                'ENTITIES': legs,
                'YLIMS': (0, 2500),
                'YLABEL': 'um',
                'TITLE': 'Swing Amplitude'
            },
            'SWING_DURATION': {
                'PLOT': 'BOX',
                'ENTITIES': legs,
                'YLIMS': (0, 200),
                'YLABEL': 'ms',
                'TITLE': 'Swing Duration'
            },
            'STANCE_AMPLITUDE': {
                'PLOT': 'BOX',
                'ENTITIES': legs,
                'YLIMS': (0, 2500),
                'YLABEL': 'um',
                'TITLE': 'Stance Amplitude'
            },
            'STANCE_DURATION': {
                'PLOT': 'BOX',
                'ENTITIES': legs,
                'YLIMS': (0, 200),
                'YLABEL': 'ms',
                'TITLE': 'Stance Duration'
            },
            'AEPx': {
                'PLOT': 'BOX',
                'ENTITIES': legs,
                'YLIMS': (-5000, 5000),
                'YLABEL': 'um',
                'TITLE': 'Anterior Extreme Position w.r.t. X'
            },
            'PEPx': {
                'PLOT': 'BOX',
                'ENTITIES': legs,
                'YLIMS': (-5000, 5000),
                'YLABEL': 'um',
                'TITLE': 'Posterior Extreme Position w.r.t. X'
            },
            'AEA': {
                'PLOT': 'BOX',
                'ENTITIES': legs,
                'YLIMS': (-220, 220),
                'YLABEL': 'Degrees',
                'TITLE': 'Anterior Extreme Angle'
            },
            'PEA': {
                'PLOT': 'BOX',
                'ENTITIES': legs,
                'YLIMS': (-220, 220),
                'YLABEL': 'Degrees',
                'TITLE': 'Posterior Extreme Angle'
            },
            'CCI': {
                'PLOT': 'BOX',
                'ENTITIES': contralateral_pairs,
                'YLIMS': (0.75, 1.02),
                'YLABEL': '/s',
                'TITLE': 'Contra-lateral Coordination Index'
            },
            'ICI': {
                'PLOT': 'BOX',
                'ENTITIES': ipsilateral_pairs,
                'YLIMS': (0.75, 1.02),
                'YLABEL': '/s',
                'TITLE': 'Ipsi-lateral Coordination Index'
            },
            'WALK_SPEED': {
                'PLOT': 'BOX',
                'ENTITIES': ['Speed'],
                'YLIMS': (0, 50),
                'YLABEL': 'mm/s',
                'TITLE': 'Average Walking Speed'
            },
            'STOLEN_SWINGS': {
                'PLOT': 'BOX',
                'ENTITIES': ['Swings/cycle'],
                'YLIMS': (0, 1.2),
                'YLABEL': '#/cycle',
                'TITLE': 'Stolen Swings Per Cycle'
            },
            'CONCURRENCY': {
                'PLOT': 'PIE',
                'ENTITIES': concurrency_states,
                'YLIMS': (0, 100.0),
                'YLABEL': '%',
                'TITLE': 'Proportional Concurrency States\n%'
            },
        }

        holder = [[] for k in range(len(specs[param]['ENTITIES']))]
        bucket = dict(zip(specs[param]['ENTITIES'], holder))

        for stack_name in stack_names:
            print "===Stack Name ==", stack_name, "====="
            ana = Analyze(pjoin(path, stack_name))

            funcky = {
                # legs
                'LEG_BODY_ANGLE': ana.getLegBodyAngles,
                'SWING_AMPLITUDE': ana.getSwingAmplitude,
                'SWING_DURATION': ana.getSwingDuration,
                'STANCE_AMPLITUDE': ana.getStanceAmplitude,
                'STANCE_DURATION': ana.getStanceDuration,
                'AEPx': ana.getAEPx,
                'PEPx': ana.getPEPx,
                'AEA': ana.getAEA,
                'PEA': ana.getPEA,

                # contralateral_pairs
                'CCI': ana.getCCI,

                # ipsi_lateral_pairs
                'ICI': ana.getICI,
                'WALK_SPEED': ana.getWalkingSpeed,
                'STOLEN_SWINGS': ana.getStolenSwings,
                'CONCURRENCY': ana.getConcurrency
            }

            bucket = filledBucket(bucket, specs[param]['ENTITIES'],
                                  funcky[param])
            csvDic = convertDicToListForCsv(bucket)
            csvfname = path + '_' + param + '_dict.csv'
            with open(csvfname, 'w') as f:
                w = csv.writer(f)
                w.writerows(csvDic)
        allData.append([param, bucket])
    return allData
Ejemplo n.º 7
0
from dash.dependencies import Input, Output
from analyze import Analyze
import dash
import dash_core_components as dcc
import dash_html_components as html
import json


analyze_run = Analyze()
data 		= analyze_run.read()
print(analyze_run.meta)

app = dash.Dash(__name__)
app.layout = html.Div([
    dcc.Graph(
        id='basic-interactions',
        figure={
            'data': [
                {
                    'x': analyze_run.dataset["FSC-A"].values,
                    'y': analyze_run.dataset["SSC-A"].values,
                    'mode': 'markers',
                    
                },
            ]
        }
    ),

    html.Div([
        dcc.Markdown("""
            **Hover Data**
Ejemplo n.º 8
0
 def analyze(self, image):
     return Analyze(image, self.bands()).apply()
Ejemplo n.º 9
0
    def do_crawl(self):
        """
        执行爬取
        """
        echo = Echo()
        url_count = 0
        url_num = len(self.url.url)
        while url_count < url_num:
            data_count = 0
            url_dict = self.url.url[url_count]
            req = requests.Request(method=url_dict['method'], url=url_dict['value'])

            #加入cookies,data和headers
            if self.parse.cookie_range[2] >= url_count + 1 >= self.parse.cookie_range[1]:
                req.cookies = self.cookie
            if len(url_dict['data']) > data_count:
                req.data = url_dict['data'][data_count]
                data_count += 1
            for ele in url_dict['addparam']:
                try:
                    req.data[ele[1]] = self.url.url[int(ele[0])-1]['text'][0]
                except IndexError:
                    print "Error!No additional param found."

            if len(self.header.header) > 0:
                req.headers = self.header.header

            s = requests.Session()
            prepped = s.prepare_request(req)
            self.resp = s.send(prepped, stream=True)
            if self.resp.status_code == 200:
                if url_count+1 == self.parse.cookie_range[0]:
                    self.cookie = self.resp.cookies
            else:
                print "status_code:{0}".format(self.resp.status_code)


            #数据分析
            if url_dict['type'] == 't':
                analyze = Analyze()
                text_list = analyze.analyze(self.resp.text, url_dict['re'])
                self.url.url[url_count]['text'] = text_list


            #输出结果
            if url_dict['type'] == 't':
                pretext = url_dict['value'] + '\n' + ''.join(str(s) + '\n' for s in url_dict['data']) + '\n\n'
                self.text = ''.join(s + '\n' for s in text_list)
            elif url_dict['type'] == 'b':
                m = url_dict['value'].rfind('/')
                pretext = url_dict['value'][m+1:]
                self.text = self.resp.iter_content(chunk_size=128)
            else:
                raise ValueError('[Type] name not found')
            if self.parse.echo_method != 'xls':
                echo.echo(self.parse.echo_method, pretext, self.text, url_dict['type'], url_count, data_count)
            else:
                echo.echo_to_xls(self.text, self.parse.xls_list[0], self.parse.xls_list[1], self.parse.xls_list[2], self.parse.xls_title)

            if data_count == len(url_dict['data']):
                url_count += 1
Ejemplo n.º 10
0
    parser.add_option("-s", "--style", dest="style", type = int, 
                      help="The color style set", default = 0)

    parser.add_option("-t", "--threshold", dest="thresh",
                      help="The bootstrap threshold")
    parser.add_option("-x", "--modelCond", dest="modelCond", default = None,
                      help="The model condition that the occupancy map will be plotted for")
    parser.add_option("-y", dest="newModel", default = None,
		      help="The new order for model conditions")
    parser.add_option("-w", dest="newOrder", default = None,
		      help="The new order for clades")
    parser.add_option("-k", "--missing", dest="missing", default = 0,
		      help="The missing data handling flag. If this flag set to one, clades with partially missing taxa are considered as complete.")
    
    parser.add_option("-o", "--output", dest="label", default = None,

		      help="name of the output folder for the relative frequency analysis. If you are using the docker it should start with '/data'.")
    parser.add_option("-g", "--outgroup", dest="outg", default = None,
		      help="Name of the outgroup for the hypothesis in relative frequency analysis specified in the annotation file, eg. Outgroup or Base.")
    opt = Opt(parser)

    analyzer = Analyze(opt)
    try:
    	analyzer.analyze()
    except ValueError:
    	print("analysis failed!")
    	raise
    	sys.exit(1)

Ejemplo n.º 11
0
 def bt3_clicked(self):
     self.form.hide()
     ui = Analyze()
     ui.show()
     ui.exec_()
     self.form.show()
Ejemplo n.º 12
0
def result():
    resume = request.form['resume']
    jobdesc = request.form['jobdesc']
    analysis = Analyze(resume, jobdesc)
    results = analysis.metrics()
    return render_template('result.html', title="Results", results=results)
Ejemplo n.º 13
0
def main():
    print('This program analyzes yout Google Fit data.')
    print(
        'Make sure you have the file \'Daily Summaries.csv\' in the same directory.'
    )
    user_input = input('Do you want to start? Y/N \n')
    if user_input == 'Y' or 'y':

        print('Type (enter number):\n')
        print('1 - Speed')
        print('2 - Activities time')
        print('3 - Steps')

        user_input = input()
        user_date = input('Please enter date in format YYY-MM-DD\n')
        validate_date(user_date)

        if user_input == '1':
            graph_speed = Analyze(user_date)
            graph_speed.speed()

        elif user_input == '2':
            bar_time = Analyze(user_date)
            bar_time.activity_time()

        elif user_input == '3':
            step_bar = Analyze(user_date)
            step_bar.steps()

    else:
        exit
Ejemplo n.º 14
0
pepxs = dict(zip(legs, [[] for k in range(6)]))
ccis = dict(zip(contralateral_pairs, [[] for k in range(3)]))
icis = dict(zip(ipsilateral_pairs, [[] for k in range(4)]))
lbas = dict(zip(legs, [[] for k in range(6)]))
speeds = {'Walk Speeds': []}
aeas = dict(zip(legs, [[] for k in range(6)]))
peas = dict(zip(legs, [[] for k in range(6)]))
concurrencies = {'concurrencies': []}

for stack_name in stack_names:
    # Init the Analyzer
    #------------------
    stack_path = pjoin(path, stack_name)
    print 'Analysing', stack_path
    print '----------------------------------------------------------------'
    ana = Analyze(stack_path)

    # Generate Gait Diagram
    #----------------------
    ana.genGaitDiagram()

    # Param 1.
    print 'Swing Amplitude'
    #-----------------------
    # Unit of Observation : One Swing Event
    for leg in legs:
        swing_amplitudes[leg].extend(ana.getSwingAmplitude(leg))
    print

    # '''
    # 2
Ejemplo n.º 15
0
 def analyze(self, image):
     return Analyze(image, self.bands(), self.mosaic_spec).apply()
Ejemplo n.º 16
0
from analyze import Analyze
# import codecs
# text = codecs.open('D:\PyCharm Community Edition 2016.3.2\DeeCamp\\NLPpart\data\CarReport\CarReport_122.txt', 'r', 'gb18030').read()

ana = Analyze()
# ana = ana.init()
# print(1)
keywords = ana.keywords
pos = ana.pos
cws = ana.cws
Ejemplo n.º 17
0
         num_test_samples) = load_anomaly_data()
    else:
        (x_train, num_train_samples, x_test, y_test,
         num_test_samples) = load_data()

    if args.backbone == "vgg":
        model = VGG16(include_top=False, weights='imagenet')
        #model_vgg = Model(input = base_vgg_model.input, output = base_vgg_model.get_layer('block4_pool').output)
    else:
        model = ResNet50(weights='imagenet', include_top=False)

    if args.task == "anomaly":
        features_train_array = model.predict(x_train)
        features_train_array = features_train_array.reshape(
            num_train_samples, -1)  #reshape to 2d from 4d array

        features_test_array = model.predict(x_test)
        features_test_array = features_test_array.reshape(num_test_samples, -1)
        # print('test array shape: ',features_test_array.shape)
        # print('train array shape: ',features_train_array.shape)
        Analyze(features_train_array, features_test_array, y_test)

    if args.task == "cluster":
        features_train_array = model.predict(x_train)
        features_train_array = features_train_array.reshape(
            num_train_samples, -1)  #reshape to 2d from 4d array

        #features_test_array = model.predict(x_test)
        #features_test_array = features_test_array.reshape(num_test_samples, -1)
        Cluster(features_train_array, num_train_samples)
Ejemplo n.º 18
0
        elif "syslog" in f:
            mapreduce_app_logs.append(f)
        else:
            pass
    ##initialize parser
    yarn_parser=YarnParser(rm_log,nm_logs,spark_app_logs,mapreduce_app_logs)
    ##parse logs
    yarn_parser.rm_parse()
    yarn_parser.nm_parse()
    yarn_parser.spark_parse()
    yarn_parser.mapreduce_parse()
    ##sort by times
    yarn_parser.sort_by_time()
    tapps=yarn_parser.get_apps()
    ##parse successful apps
    apps=Analyze.success_apps(tapps)
    #apps=tapps
    ##do analysis
    total_delays=Analyze.total_delay(apps)
    persist_map(output_dir+"/total",total_delays)
    
    #in_delays=Analyze.in_application_delays(apps)
    #persist_map(output_dir+"/in",in_delays)

    out_delays=Analyze.out_application_delays(apps)
    persist_map(output_dir+"/out",out_delays) 

    am_delays=Analyze.am_delay(apps)
    persist_map(output_dir+"/am",am_delays)

    c1_delays=Analyze.c1_delay(apps)
Ejemplo n.º 19
0
from random import choice

from analyze import Analyze
from tools.args_parser import command_line_arguments
from tools.create_loggger import create_console_logger_handler

if __name__ == '__main__':
    args = command_line_arguments()
    create_console_logger_handler()
    analyze = Analyze(num_of_threads=choice(range(2, 10)),
                      max_stress_duration=args.stress_duration)
    analyze.run()
    analyze.analyze()
Ejemplo n.º 20
0
    if args.backbone == "vgg":
        model = VGG16(include_top=False, weights='imagenet')
        #model_vgg = Model(input = base_vgg_model.input, output = base_vgg_model.get_layer('block4_pool').output)
    else:
        model = ResNet50(weights='imagenet', include_top=False)

    if args.task == "anomaly":
        features_train_array = model.predict(x_train)
        features_train_array = features_train_array.reshape(
            num_train_samples, -1)  #reshape to 2d from 4d array

        features_test_array = model.predict(x_test)
        features_test_array = features_test_array.reshape(num_test_samples, -1)
        estimator_str = "svc"
        if args.subtask == "outlier":
            estimator_str = "isolationforest"

        #estimator_params = GridSearch(features_train_array,y_train,estimator_str)

        Analyze(args, estimator_str, features_train_array, y_train,
                features_test_array, y_test, testoutputfile)

    if args.task == "cluster":
        features_train_array = model.predict(x_train)
        features_train_array_np = np.array(features_train_array)
        features_train_array_list.append(features_train_array_np.flatten())
        features_train_array_list_np = np.array(features_train_array_list)

        Cluster(features_train_array, num_train_samples)
Ejemplo n.º 21
0
    # open bookmarks file and feed to the parser
    bookmarks = None
    try:
        my_logger.info(f'Processing input file: {TheConfig.input_file}')
        with open(TheConfig.input_file, mode='r', encoding='utf-8') as html:
            bookmarks_html = html.read()
        html_parser.feed(bookmarks_html)
        bookmarks = html_parser.parser.bookmarks
    except Exception as e:
        my_logger.exception(f'Exception parsing file: {e}', exc_info=e)

    # analyze bookmarks just parsed
    analysis = None
    try:
        analysis = Analyze(bookmarks.bookmarks)
    except Exception as e:
        my_logger.exception(f'Exception analyzing file: {e}', exc_info=e)

    # create bookmark output structure
    output = None
    try:
        output = Reformat(analysis).output
        my_logger.info(f'Creating output file: {TheConfig.output_file}')
        with open(TheConfig.output_file, 'w') as file:
            for s in output:
                if isinstance(s, list):
                    for s_ in s:
                        file.write(s_ + '\n')
                else:
                    file.write(s + '\n')
Ejemplo n.º 22
0
from analyze import Analyze
import argparse

# ap = argparse.ArgumentParser()
# ap.addargument("-f", "--folder")
# opts = ap.parse_args()

run = Analyze()
run.read()
files = run.files


def indexer():
    with open("FACS_INDEX.txt", "w") as file:
        for i in files:
            run.read(i)
            meta = run.meta
            str_to_save = f"File: {meta['$FIL']},Date: {meta['$DATE']},\n"
            file.write(str_to_save)


indexer()