Example #1
0
 def analysis(self):
     QMessageBox.about(self, '提示', "分析需要些时间")
     self.text.clear()
     analysis.analysis()
     self.log_browser.append('<font color="red">分析好了</font>')
     self.job_analysis_menu.setEnabled(True)
     self.company_analysis_menu.setEnabled(True)
def foreach_decompose(file_num):
    f_filename = flst[file_num]
    ffile = read_hdf(f_filename)
    if adjust_ops == 'subrange':
        ffile.data = adjust.subrange(ffile.data, **adjust_ops.keywords)
    if rebin:
        ffile.data = analysis.rebin(ffile.data, fac=rebin)
    # central difference for interior, forward and backward at the boundaries
    grad = np.gradient(ffile.data, axis=pdim)
    if dfdp:
        h5_output.data = grad
    else:
        pf = - paxis * ffile.data
        # there are zeros because: 1. the origin is included in the axis points; 2. the tail of distribution is zero
        pf[pf == 0.] = 999999.0
        h5_output.data = np.divide(grad, pf)
    if mininp:
        h5_output.data = adjust.subrange_phys(h5_output.data, bound=mininp,
                                              axis=pdim, axesdata=tmp_axis, update_axis=False)
        tmp = h5_output.data.copy()
        h5_output.data = analysis.analysis(h5_output.data, [getmin])
    h5_output.run_attributes['TIME'][0] = ffile.run_attributes['TIME'][0]
    h5_output.run_attributes['ITER'][0] = ffile.run_attributes['ITER'][0]
    newname = outdir + os.path.basename(f_filename)
    write_hdf(h5_output, newname)
    if mininp:
        h5_output.data = pax[analysis.analysis(tmp, [min_index])]
        write_hdf(h5_output, outdir + inddir + os.path.basename(f_filename))
    return f_filename
Example #3
0
def master(release, user, pword, host, port): 
  

  ## Get all human protein coding genes from ensembl and a count of all uniqe domains.
  import os
  import pfamDomains
  import mapPfamDomains
  import pdbChembl
  import uniprotChembl
  import analysis

  #Set the threshold for ligand binding to 50 micromolar
  th = 50
  
  # Get Uniprot identifiers for all human proteins.
  os.system("R CMD BATCH --vanilla queryBioMaRt.R")
  # Map Pfam domains and positions to all Uniprot identifiers.
  pfamDomains.pfamDomains(release, user, pword, host, port)
  # Map small molecule binding to PFam domains.
  mapPfamDomains.mapPDs(th, release, user, pword, host, port)
  # Get all ChEMBL interactions in PDB and binding site residues.
  pdbDict = pdbChembl.query(release, user, pword, host, port)
  # Get all ChEMBL interactions in Uniprot and binding site annotation.
  uniprotDict = uniprotChembl.query(release, user, pword, host, port)
  # Analyze the data.
  analysis.analysis(th, release, user, pword, host, port)
Example #4
0
 def analysis(self):
     df = readTemplate()
     if (dbOrExcel == db):
         res = query('select * from t_sale_info')
         for re in res:
             row = [re[1], re[2], re[3], re[4], re[6], re[5]]
             df.loc[len(df)] = row
     else:
         df = common_merge_excel()
     # 分析
     analysis(df)
Example #5
0
def analyze(filepath, filename):
    # TXT to CSV
    analysis(os.path.join(app.config["UPLOAD_FOLDER"], filename), filename)

    os.remove(os.path.join(app.config["UPLOAD_FOLDER"], filename))

    # Read CSV
    df = pd.read_csv(os.path.join("csvs", filename))

    # functions to get the facts/information about the data
    contacts = np.unique(df["Contacts"]).shape[0]
    pie(filename)
    most(filename)
    word(filename)
    week(filename)
    msgs = number_of_msgs(filename)
    member = number_of_unique_members(filename)
    sdate = start_date(filename)
    edate = end_date(filename)
    avg = average_length_msg(filename)[:4]
    maxl, name = max_length_msg(filename)
    month = month_busy(filename)
    day = weekday_busy(filename)

    # there is two types of dashboard is created on the basis of number of contacts in dataset
    if np.unique(df["Contacts"]).shape[0] > 5:
        least(filename)
        return render_template(
            "dash2.html",
            filename=filename,
            msgs=msgs,
            member=member,
            sdate=sdate,
            edate=edate,
            day=day,
            avg=avg,
            maxl=maxl,
            name=name,
            month=month,
        )
    else:
        return render_template(
            "dash1.html",
            filename=filename,
            msgs=msgs,
            member=member,
            sdate=sdate,
            edate=edate,
            day=day,
            avg=avg,
            maxl=maxl,
            name=name,
            month=month,
        )
Example #6
0
def fetch(driver):
    print("----- Fetching -----")
    driver.get("https://roobet.com/crash")
    driver.set_window_size(1920, 1200)
    time.sleep(5)
    driver.find_element_by_tag_name('body').send_keys(Keys.TAB)
    driver.find_element_by_tag_name('body').send_keys(Keys.TAB)
    driver.find_element_by_tag_name('body').send_keys(Keys.TAB)
    driver.find_element_by_tag_name('body').send_keys(Keys.ENTER)
    actions = ActionChains(driver)
    #actions.move_by_offset( 795, 718).click().perform()
    a = driver.find_element(By.CSS_SELECTOR, ".tick_2dJyV:nth-child(1)").text
    new_csv()
    df = pd.DataFrame([[a, pd.datetime.now()]], columns=(["Crash Point"], ["datetime"]))
    df.to_csv("scraped.csv",  mode='a', header=False)

    anal = analysis("scraped.csv")
    while True:
        b = driver.find_element(By.CSS_SELECTOR, ".tick_2dJyV:nth-child(1)").text
        if b != a:
            a = b
            print("---- Last Crash: " + str(b))
            bdf = pd.DataFrame([[b, pd.datetime.now()]], columns=(["Crash Point"], ["datetime"]))
            bdf.to_csv("scraped.csv", mode='a', header=False)
            b = b[:-1]
            b = float(b)
            anal.main(b, 1.5)
Example #7
0
def run_simulation(gas_type, pressure, num_grid, grid_distance,
                   scint_distance):
    global counter
    file = open('output_raw.log', 'a+')

    make_json_file(gas_type, pressure, num_grid, grid_distance, scint_distance)

    cmd = '../build/mmSim config.json -t 6'
    os.system(cmd)

    silhouette_score, efficiency = analysis('sim.root', 'output.root')

    dir_name = 'output/' + str(counter)

    try:
        os.mkdir(dir_name)
    except:
        pass

    shutil.move('config.json', str(dir_name) + '/config.json')
    # shutil.move('sim.root', str(dir_name) + '/sim.root')
    shutil.move('output.root', str(dir_name) + '/output.root')

    try:
        os.remove('sim.root')
    except:
        pass

    file.write('%s %s %s %s %s %s %s\n' %
               (gas_type, pressure, num_grid, grid_distance, scint_distance,
                silhouette_score, efficiency))
    file.close()

    counter += 1
    return silhouette_score, efficiency
Example #8
0
    def plot_contour(self, file1, file2, file_num, ax, fig):
        try:
            data = self.get_data(file2, file_num + 1)
        except:
            data = read_hdf(file2.filename).data
        if ('operation' in self.general_dict.keys()):
            data = analysis(data, self.general_dict['operation'])
        axes = self.get_axes(file_num)
        axis1 = self.construct_axis(file1, axes[0], file_num)
        axis2 = self.construct_axis(file1, axes[1], file_num)
        if ('operation' in self.general_dict.keys()):
            axis2 = reflect(axis2, self.general_dict['operation'])
        grid_bounds = [axis1[0], axis1[-1], axis2[0], axis2[-1]]

        levels = np.linspace(1.5,
                             np.max(data) - 0.5,
                             int(np.max(data) + .001) - 1)

        rep = np.array(
            [len(axis1) / data.shape[1],
             len(axis2) / data.shape[0]])

        imAx = ax.contour(np.kron(data, np.ones((rep[1], rep[0]))),
                          levels=levels,
                          linewidths=0.5,
                          colors=self.get_colormap(file_num + 1),
                          extent=grid_bounds)
Example #9
0
def periodically_classify(calibration, filename='data.csv'):
    while True:
        data = read_delete_when_available(filename)
        result = analysis.analysis([data[:, 0]], [data[:, 1]])
        calibration['new'] = [[result[0][0]], [result[1][0]]]
        prediction = analysis.KNN.predict_proba([[result[0][0], result[1][0]]])
        label_classification(calibration, prediction)
Example #10
0
 def test_analysis(self):
     analysis_runner = analysis()
     # analysis_runner.crawl(u"Frankfurt am Main")
     # analysis_runner.save_to_pickle()
     analysis_runner.load_from_pickle()
     self.assertEqual(len(analysis_runner.venues), 50)
     self.assertGreater(len(analysis_runner.tips), 500)
Example #11
0
 def test_assert_equal_x64_simple_para_clang_O0(self):
     evaluateResult(
         analysis(
             C_PATH + 'x86_64/simple_c_para_type_test-clang-m64-O0_ex.cfg',
             arch64),
         program_arity(C_PATH +
                       'x86_64/simple_c_para_type_test-clang-m64-O0.o'))
Example #12
0
def cmd(opt):

    ##変数の初期化
    Filenames = []
    fn = []
    l = 1
    a = {"sd": "00000000", "ed": "99999999", "ex": "export.csv", "l": 1}

    ##コマンドラインオプションをそれぞれ変数に代入
    for o in opt[1:]:
        s = o.split("=")
        a[s[0]] = s[1]

    ##全てのファイルが選択されていた場合の処理
    if a["f"] == "all":
        Filenames = os.listdir("targets")
        Filenames.remove("tmp")

    #カンマ区切りで与えられたファイル名を分割してリスト化
    else:
        Filenames = a["f"].split(",")

    try:
        l = int(a["l"])
    except ValueError:
        print("lの値は0~4で指定してください。")

    if not a["sd"].isdecimal() and a["ed"].isdecimal():
        print("sd,edはyyyymmddの形式、半角英数字で入力してください。初期設定に戻します")
        a["sd"] = "00000000"
        a["ed"] = "99999999"

    fn = Filenames

    try:
        if l != 0:
            Filenames = divider.divider(Filenames, l)

        analysis.analysis(Filenames, a["sd"], a["ed"], a["ex"], fn)

        if l != 0:
            for f in Filenames:
                os.remove("targets/" + f)

    except FileNotFoundError:
        print("対象ファイルが存在しません。正しく exports/ 内に存在しているか確認してください。")
Example #13
0
def index():
    if request.method == 'POST':
        address = request.form['address']
        result = analysis(address)
        if isinstance(result,str):
        	return render_template('index.html',message=result)
        return redirect(url_for('score', address=address))
    return render_template('index.html',message='')
Example #14
0
def run(num_sets, num_tasks, utilization, percent, soft_contrib, duration, cpu_count):
    # Create taskset
    tset = create(num_sets, num_tasks, utilization)
    cbs_res = None
    edf_res = None
    for tasks in tset:
        config = default_config(duration, cpu_count)
        add(config, tasks, percent, soft_contrib)

        config_cbs(config)
        cbs_res = analysis.merge(analysis.analysis(run_model(config)), cbs_res)

        config_edf(config)
        edf_res = analysis.merge(analysis.analysis(run_model(config)), edf_res)


    return analysis.division(cbs_res, len(tset)), analysis.division(edf_res, len(tset))
Example #15
0
def index():
    if request.method == 'POST':
        address = request.form['address']
        result = analysis(address)
        if isinstance(result, str):
            return render_template('index.html', message=result)
        return redirect(url_for('score', address=address))
    return render_template('index.html', message='')
Example #16
0
def main():
    print "Place that you want to search : "
    # replace whitespace with '+'
    search_key = raw_input().replace(" ", "+")
    # Find place place_id and place_information from search_key
    place_id, place_information = get_place_id(search_key)
    print "searching person who checked-in the place"
    # send place_id
    users = get_images(place_id)
    system('clear')
    print "3 seconds later, usernames will be write to screen... "
    sleep(3)
    for i in users:
        print usernames[i]
    raw_input('Please enter a key to continue')
    system('clear')
    print "Successfully,users saved and analysis is starting..."
    analysis(place_information, users)
Example #17
0
    def plot_grid(self, file, file_num, ax, fig):
        try:
            data = self.get_data(file, file_num)
        except:
            data = read_hdf(file.filename).data
        if ('operation' in list(self.general_dict.keys())):
            data = analysis(data, self.general_dict['operation'])
        axes = self.get_axes(file_num)
        axis1 = self.construct_axis(file, axes[0], file_num)
        axis2 = self.construct_axis(file, axes[1], file_num)
        grid_bounds = [axis1[0], axis1[-1], axis2[0], axis2[-1]]

        maximum, minimum = self.get_min_max(file_num)

        if (self.is_log_plot(file_num)):
            if (maximum == 0):
                new_max = 0
            else:
                new_max = maximum / np.abs(maximum) * 10**(
                    int(np.log10(np.abs(maximum))) + 1)
            if (minimum == 0):
                new_min = 0
            else:
                new_min = minimum / np.abs(minimum) * 10**(
                    int(np.log10(np.abs(minimum))) + 1)

            threshold = self.general_dict['log_threshold'][file_num]
            imAx = ax.imshow(data, aspect='auto', origin='lower', \
                             interpolation='bilinear', vmin=new_min, vmax=new_max, \
                             norm=matplotlib.colors.SymLogNorm(threshold), extent=grid_bounds,
                             cmap=self.get_colormap(file_num))
        else:
            imAx = ax.imshow(data, aspect='auto', origin='lower', \
                             interpolation='bilinear', vmin=minimum, vmax=maximum, extent=grid_bounds,
                             cmap=self.get_colormap(file_num))

        if ('x1_zoom' in list(self.general_dict.keys())):
            plt.xlim(self.general_dict['x1_zoom'])
        if ('x2_zoom' in list(self.general_dict.keys())):
            plt.ylim(self.general_dict['x2_zoom'])
        indices = self.get_indices(file_num)
        selectors = indices[1:-1]
        if (indices[0].lower() == 'raw'):
            long_name = self.get_name(file, 'q') + self.append_legend(
                file_num) + r'$\/$' + self.get_units(file, 'q')
        else:
            long_name = self.get_name(file) + self.append_legend(
                file_num) + r'$\/$' + self.get_units(file)

        if (self.is_log_plot(file_num)):
            self.add_colorbar(imAx, long_name,
                              self.mod_tickers(minimum, maximum, threshold),
                              ax, fig)
        else:
            self.add_colorbar(imAx, long_name, None, ax, fig)

        self.set_labels(ax, file, axes, file_num)
Example #18
0
def farmerinfo():
    username=request.form['username']
    print(username)
    result=farmer_info(username)
    result[0][4]=result[0][4].title()
    print(result)
    stat1,stat2,stat3,stat41,stat42=analysis(result)
    print(stat1,stat2,stat3,stat41,stat42)
    return render_template('farmerinfo.html',result=result,stat1=stat1,stat2=stat2,stat3=stat3,stat41=stat41,stat42=stat42)
Example #19
0
	def __init__(this):
		this.dbconnection = dbconnection.dbconnection()
		this.analysis = analysis.analysis()
		print "Content-type: text/html\n"
		try:
			cookies = Cookie.SimpleCookie(os.environ["HTTP_COOKIE"])
			userid = cookies['userid'].value
			print this.update(userid)
		except Exception, e:
			print "error getdata: %s" %(e)
Example #20
0
def main():
    for i in range(300, 321):
        for j in range(20, 26):
            file_name = str('movie_2_nucleus_working_raw_t' + str(i) + "_z0" +
                            str(j))
            print(file_name)
            temp = pd.DataFrame(analysis(file_name + '.png'))
            temp.to_csv(cwd + export_path + file_name + "_3",
                        index=False,
                        header=True)
Example #21
0
def trading():
    # not secure input method
    if request.method == 'POST': # request.form['ticker'].isalpha() == True: # this method doesn't allow entry of international tickers (syntax of TIC.EX)
        if request.form.get('quantum'):
            import quantum
            return 'qc pred' # str(quantum.analysis(request.form['ticker']))
        else:
            import analysis
            return str(analysis.analysis(request.form['ticker'])) # placeholder
    else:
        return 'Not a ticker!'
Example #22
0
    def __init__(self):
        super().__init__()

        self.an = analysis()
        self.radio = []
        self.rank = []
        self.c = Communicate()
        self.c.bar_on.connect(self.bar_unlimit_on)
        self.c.bar_off.connect(self.bar_unlimit_off)

        self.initUI()
Example #23
0
    def analyze(self):
        a = analysis(self.symbol)
        clean = analysis(self.symbol).clean(a.df)
        if len(a.df)>0:
            self.earnings_est = clean(0)
            self.revenue = clean(1)
            self.earnings_history = clean(2)
            self.eps_trend = clean(3)
            self.eps_revisions = clean(4)
            self.growth_estimates = clean(5)
        else:
            self.earnings_est = np.nan
            self.revenue = np.nan
            self.earnings_history = np.nan
            self.eps_trend = np.nan
            self.eps_revisions = np.nan
            self.growth_estimates = np.nan

        self.a_list.append(self.symbol)
        self.attributes.append(a.attributes)
Example #24
0
 def __init__(self):
     self.dbc = DBcontroller()
     self.mecab = analysis()
     # self.mecab.getnouns(comment)
     self.product_name = [
         "구글홈", "아이폰 XS", "갤럭시 S9", "엘지 G7", "엘지 그램 15 2018",
         "삼성 노트북 9 always", "갤럭시탭 S4", "아이패드 6세대", "아이패드 프로 3세대"
     ]
     self.product_name_DB_version = [
         "go_s_home", "ap_p_ipxs", "ss_p_s9", "lg_p_g7", "lg_n_gram15",
         "ss_n_alwy9", "ss_t_galtap4", "ap_t_ipd6", "ap_t_pro3"
     ]
Example #25
0
def analyse():
    entity = request.args.get('entity')
    source = request.args.get('source')
    analysis_by_year, magnitude_by_year = analysis.analysis(entity, source)
    return {
        'sentiment': {
            source: analysis_by_year
        },
        'magnitude': {
            source: magnitude_by_year
        }
    }
Example #26
0
def upload_file():
    cleanup()
    if request.method == 'POST':
        # check if the post request has the file part
        if 'file' not in request.files:
            flash('No file part')
            return redirect(request.url)
        file = request.files['file']
        # if user does not select file, browser also
        # submit a empty part without filename
        if file.filename == '':
            flash('No selected file')
            return redirect(request.url)
        if file and allowed_file(file.filename):
            filename = secure_filename(file.filename)
            file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
            # Do analysis
            analysis("." + url_for('uploaded_file', filename=filename))
            output_filename = "output.xlsx"
            return redirect(url_for('uploaded_file', filename=output_filename))
    return render_template("index.html")
Example #27
0
def index():

    if request.method != 'POST':
        max_val = 100
        min_val = 0
        future_val = 100
        return render_template('index.html',
                               max_val=max_val,
                               min_val=min_val,
                               future_val=future_val)

    err_msg = ''
    max_val = request.form['max']
    min_val = request.form['min']
    future_val = request.form['future']
    file = request.files['file']

    if not max_val.isdecimal():
        err_msg += '上限値が設定されていません。'
        flash("上限値が設定されていません。", 'danger')

    if not min_val.isdecimal():
        err_msg += '下限値が設定されていません。'
        flash("下限値が設定されていません。", 'danger')

    if not future_val.isdecimal():
        err_msg += '予測日数が設定されていません。'
        flash("予測日数が設定されていません。", 'danger')

    if file.filename == '':
        err_msg += 'ファイルがありません。'
        flash("ファイルがありません。", 'danger')

    if err_msg != '':
        return render_template('index.html',
                               max_val=max_val,
                               min_val=min_val,
                               future_val=future_val)

    # サニタイズ処理
    basename = secure_filename(file.filename)

    # フルパス
    filename = os.path.join(app.config['TEMP_DIR'], basename)

    file.save(filename)

    # 予測値取得
    header, datas = analysis(filename, app.config['IMAGES_DIR'], int(max_val),
                             int(min_val), int(future_val))

    return render_template('chart.html', header=header, datas=datas)
Example #28
0
def main():
    args = parser.parse_args()
    modify_arguments(args)

    # Resetting the graph and setting seeds
    tf.reset_default_graph()
    tf.set_random_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    with open(args.config_file, 'r') as stream:
        config = yaml.load(stream)
        args.config = Munch(modify_config(args, config))

    logger.info(args)

    if args.mode == 'train':
        train(args)
    elif args.mode == 'test':
        test(args)
    elif args.mode == 'analysis':
        analysis(args)
Example #29
0
def get_bounds(self, file_name, num, file_num):
    file_name = file_name + str(str(1000000 + num)[1:]) + '.h5'
    try:
        file = h5py.File(file_name, 'r')
        data = self.get_data(file, file_num)
    except:
        data = read_hdf(file_name).data
    if ('operation' in self.general_dict.keys()):
        data = analysis(data, self.general_dict.get('operation'))
    minimum, maximum = np.min(data), np.max(data)
    file.close()
    del data
    return minimum, maximum
def test_main_csv():
    testing_input_csv_path = Path().cwd() / 'testing' / 'testing_data.csv'
    testing_output_csv_path = Path().cwd(
    ) / 'testing' / 'testing_data_output.csv'
    testing_output_template_csv_path = Path().cwd(
    ) / 'testing' / 'testing_data_complete.csv'
    analysis(testing_input_csv_path, testing_output_csv_path)
    fields = ['image1', 'image2', 'similar']
    analysis_output_df = pd.read_csv(testing_output_csv_path,
                                     names=fields,
                                     usecols=fields,
                                     header=0)
    analysis_template_df = pd.read_csv(testing_output_template_csv_path,
                                       names=fields,
                                       usecols=fields,
                                       header=0)

    # Run the analysis program
    assert analysis_output_df.equals(
        analysis_template_df
    ), "Output csv: <" + str(
        testing_output_csv_path) + "> does not match template csv: <" + str(
            testing_output_template_csv_path) + ">"
Example #31
0
def main():

    # argumento eh o link do aplicativo
    # exemplo: https://play.google.com/store/apps/details?id=com.github.android
    try:
        arg = sys.argv[1]
        URL = arg + URL_SUFIX
    except IndexError:
        sys.exit('NO ARGUMENT')

    # informacoes extraidas do app e comentarios
    app_info = next(get_app_info(URL))
    comments = list(get_comments(URL))

    # faz a analise dos comentarios
    analysis(app_info, comments)

    # insere no banco
    # noinspection PyUnresolvedReferences
    try:
        create(app_info, comments)
    except pymongo.errors.DuplicateKeyError:
        sys.exit('THAT APPLICATION ALREADY EXISTS IN THE DB')
Example #32
0
def computeNow():
    # startOfCurrentDay = datetime.datetime.now().replace(hour=8, minute=0, second=0, microsecond=0)

    # currentTimePlus = startOfCurrentDay
    currentTimePlus = datetime.datetime.now()

    data = ct.classifyTime(currentTimePlus, 'randomTimePrice.csv', 'compute')
    a, b, bias = analysis.analysis()
    predict = data["PreviousMean"].values[0] * a + data["WeekDayMean"].values[
        0] * b + bias
    print("PreviousMean value: {}".format(data["PreviousMean"].values[0]))
    print("WeekDayMean value: {}".format(data["WeekDayMean"].values[0]))
    print("predict value: {}".format(predict))
    print("Actual value: {}".format(data["Actual"].values[0]))
    return predict
Example #33
0
 def test_tips_tagging(self):
     analysis_runner = analysis()
     tagger_de = POSTagger(self.path_to_treetagger, self.path_to_parameter_file_de)
     
     analysis_runner.load_from_pickle()
     result = []
     i = 0
     
     for tip in analysis_runner.tips:
         tip.pos = tagger_de.tag(tip.text)
         result.append(tip)
         print tip.text, tip.pos
         i += 1
         if i == 10:
             break
def foreach_decompose(file_num):
    e_filename = e1[file_num]
    e1file = read_hdf(e_filename)
    e2file = read_hdf(e2[file_num])
    e1data = analysis.analysis(e1file.data, ['fft'])
    e2data = analysis.analysis(e2file.data, ['fft'])
    h5_output.run_attributes['TIME'][0] = e1file.run_attributes['TIME'][0]
    h5_output.run_attributes['ITER'][0] = e1file.run_attributes['ITER'][0]
    if blockx or blocky:
        e1data = arraymask.mask(e1data, axes=[ky, kx], region=brg)
        e2data = arraymask.mask(e2data, axes=[ky, kx], region=brg)

    dive = np.multiply(kxvec, e1data) + np.multiply(kyvec, e2data)

    h5_output.data = np.multiply(kxvec, dive)
    # transverse part, longitudinal part
    if ift:
        tr1, e1data = (analysis.analysis(e1data - h5_output.data,
                                         ['ifft ' + ift]),
                       analysis.analysis(h5_output.data, ['ifft ' + ift]))
    else:
        tr1, e1data = e1data - h5_output.data, h5_output.data
    if 't1' in ocomp:
        h5_output.data = np.abs(tr1)
        h5_output.run_attributes['NAME'][0], h5_output.data_attributes[
            'LONG_NAME'] = 'Et1', 'E_{t1}'
        newname = outdir + 'Et1' + os.path.basename(e_filename)[2:]
        write_hdf(h5_output, newname)

    h5_output.data = np.multiply(kyvec, dive)
    if ift:
        tr2, e2data = (analysis.analysis(e2data - h5_output.data,
                                         ['ifft ' + ift]),
                       analysis.analysis(h5_output.data, ['ifft ' + ift]))
    else:
        tr2, e2data = e2data - h5_output.data, h5_output.data
    if 't2' in ocomp:
        h5_output.data = np.abs(tr2)
        h5_output.run_attributes['NAME'][0], h5_output.data_attributes[
            'LONG_NAME'] = 'Et2', 'E_{t2}'
        newname = outdir + 'Et2' + os.path.basename(e_filename)[2:]
        write_hdf(h5_output, newname)
    if 't' in ocomp:
        h5_output.data = np.sqrt(np.abs(np.square(tr1) + np.square(tr2)))
        h5_output.run_attributes['NAME'][0], h5_output.data_attributes[
            'LONG_NAME'] = 'ET', 'E_T'
        newname = outdir + 'ET' + os.path.basename(e_filename)[2:]
        write_hdf(h5_output, newname)
    if 'l' in ocomp:
        h5_output.data = np.sqrt(np.abs(np.square(e1data) + np.square(e2data)))
        h5_output.run_attributes['NAME'][0], h5_output.data_attributes[
            'LONG_NAME'] = 'EL', 'E_L'
        newname = outdir + 'EL' + os.path.basename(e_filename)[2:]
        write_hdf(h5_output, newname)
    return e_filename
Example #35
0
    def plot_lineout(self, file, file_num, ax, fig):
        try:
            data = self.get_data(file, file_num)
        except:
            data = read_hdf(file.filename).data
        if ('operation' in list(self.general_dict.keys())):
            data = analysis(data, self.general_dict['operation'])
        axes = self.get_axes(file_num)
        xx = self.construct_axis(file, axes[0], file_num)
        maximum, minimum = self.get_min_max(file_num)
        indices = self.get_indices(file_num)
        selectors = indices[1:-1]
        if (indices[0].lower() == 'raw'):
            label = self.get_name(file, 'q') + self.append_legend(file_num)
        else:
            label = self.get_name(file) + self.append_legend(file_num)

        if (self.is_log_plot(file_num)):
            ax.plot(xx,
                    data,
                    self.get_marker(file_num),
                    label=label,
                    linewidth=self.get_linewidth())
            side = self.general_dict['side'][file_num]
            if (side == 'left'):
                ind = 0
            else:
                ind = 1
            threshold = self.general_dict['log_threshold'][ind]
            plt.yscale('symlog',
                       linthreshy=threshold,
                       vmin=minimum,
                       vmax=maximum)
            ax.set_ylim([minimum, maximum])

        else:
            ax.plot(xx,
                    data,
                    self.get_marker(file_num),
                    label=label,
                    linewidth=self.get_linewidth())
            ax.set_ylim([minimum, maximum])
            ax.minorticks_on()
            plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))

        self.set_labels(ax, file, axes, file_num)
Example #36
0
def calibrate(filename, measurements=20, sep=1):
    calibrate_results = {}
    for label in LABELS:
        calibrate_results[label] = [[], []]

    for j in range(sep):
        for label in LABELS:
            print('Please think {} for {} seconds'.format(label, measurements))
            time.sleep(3)

            for i in range(measurements):
                data = read_delete_when_available(filename)
                c1 = data[:, 0]
                c2 = data[:, 1]
                result = analysis.analysis([c1], [c2])
                calibrate_results[label][0].append(result[0][0])
                calibrate_results[label][1].append(result[1][0])

    return calibrate_results
def main(argv):
    global API_URL
    API_URL = argv[1]
    
    print "Server: " + API_URL
        
    while True:
        try:
            task =  quest_task()
            if None == task:
                print "no more task now, waiting for 20 seconds......"
                time.sleep(20)
                continue
            
            url = task['url']
            print "new task " + url
            
            def progress(val, url):
                print "%d%% checked - %s" % (val, url)
                
            def meta(metadata):
                print metadata['title']
                print "task_id = %d, reporting metadata to server......" % (task['_id'], )
                print "ok." if 0 == report_meta(task['_id'], metadata) else "failed."
            
            totalpart, match_set, details = analysis.analysis(url, progress_callback = progress, meta_callback = meta)
            result = 100 * len(match_set) / totalpart
            print "completed. result = %d%%" % (result, )
            print "reporting to server......"
            report_ret = report_result(task['_id'], result, json.dumps(details))
            if 0 == report_ret:
                print "ok."
            else:
                print "failed - %d" % (report_ret, )
            
        except Exception, e:
            print e
            print "waiting for 30 seconds......"
            time.sleep(30)
            continue
Example #38
0
def score(address):
    score_data = analysis(address)
    yelp_list = sorted(score_data['yelp_results'].values(), key=lambda k: k['distance'])
    yelp_markers = [[restaurant['name'], restaurant['lat'], restaurant['lon']] for restaurant in yelp_list]
    num_places = len(yelp_list)
    sum_reviews = sum([restaurant['review_count'] for restaurant in yelp_list])
    violent_rank = nth(str(int(score_data['violent_rank'] * 100)))
    property_rank = nth(str(int(score_data['property_rank'] * 100)))
    return render_template('score.html',
    						number=score_data['address']['street_number'],
    						street=score_data['address']['route'],
    						town=score_data['address']['locality'],
    						state=score_data['address']['state'],
    						zip=score_data['address']['postal_code'],
    						lum=score_data['lum'],
    						rep_agency=score_data['police_name'],
    						rep_agency_dist=score_data['pol_distance'],
    						pop_served=score_data['pop_served'],
    						violent_crime_pc=score_data['violent_crime_pc'],
    						violent_rank=violent_rank,
    						property_crime_pc=score_data['property_crime_pc'],
    						property_rank=property_rank,
    						restaurants=yelp_list,
    						address_lat=score_data['address']['lat'],
    						address_lng=score_data['address']['lng'],
    						yelp_markers=json.dumps(yelp_markers),
    						num_places=num_places,
    						sum_reviews=sum_reviews,
    						avg_yelp_rating=score_data['avg_yelp_rating'],
    						num_walkable=score_data['num_walkable'],
    						avg_walk=score_data['avg_walk'],
    						avg_good_dr=score_data['avg_good_dr'],
    						avg_bad_dr=score_data['avg_bad_dr'],
    						walk_grade=score_data['walk_grade'],
    						drive_grade=score_data['drive_grade'],
    						rest_grade=score_data['rest_grade'],
    						walk_img=score_data['walk_img'],
    						gd_img=score_data['gd_img'],
    						bd_img=score_data['bd_img']
    						)
Example #39
0
# -*- coding: cp1251 -*-
#
#	ls-dyna nodout file reader
#
import reader
import analysis

analysis.analysis().run()

"""
r=reader.lsdyna_reader ()
r.read_nodout ('nodout')
r.nodeout_print_test()
"""
Example #40
0
	def test_assert_equal_x64_simple_para_clang_O0(self):
		evaluateResult(analysis(C_PATH+'x86_64/simple_c_para_type_test-clang-m64-O0_ex.cfg',arch64),
						program_arity(C_PATH+'x86_64/simple_c_para_type_test-clang-m64-O0.o'))
Example #41
0
	def test_assert_equal_x64_simple_gcc_O0(self):
		evaluateResult(analysis(C_PATH+'x86_64/simple_c_func_test-gcc-m64-O0_ex.cfg',arch64),
						program_arity(C_PATH+'x86_64/simple_c_func_test-gcc-m64-O0.o'))
Example #42
0
	def test_assert_equal_x86_indirect_call_test_2_clang_O0(self):
		evaluateResult(analysis(C_PATH+'x86/indirect_call_test_2-clang-m32-O0_ex.cfg',arch86),
						program_arity(C_PATH+'x86/indirect_call_test_2-clang-m32-O0.o'))
Example #43
0
	def test_assert_equal_x86_simple_clang_O0(self):
		evaluateResult(analysis(C_PATH+'x86/simple_c_func_test-clang-m32-O0_ex.cfg',arch86),
						program_arity(C_PATH+'x86/simple_c_func_test-clang-m32-O0.o'))
Example #44
0
#coding=utf-8
import login
import Search
import json
import analysis
if __name__ == '__main__':
	client = login.APILogin()
	login.browserLogin()
	print "========================="
	print "1.Run the Search Model"
	print "2.Run the Analysis Model"
	choice = int(raw_input("Your choice:"))
	if choice == 1:
		Search.search(client)
	elif choice == 2:
		analysis.analysis(client)
	else:
		print "Invalid choice!"
	
Example #45
0
from sklearn.ensemble import AdaBoostClassifier

if __name__ == '__main__':
    # load raw txt data
    parser = parsing.parsing()
    extracted_features_path = './Data/Cornell/features/office_data_nodefeats.txt'
    raw_data = parser.parse_txt_data(extracted_features_path)
    # load ply data, here just test for office scene 1
    pic_path_set = ['./Data/Cornell/office_data_ply/scene5.ply']
    for pic_path in pic_path_set:
        office_data = parser.parse_ply_data(pic_path)
        print(office_data)


    # create analysis utils
    analyser = analysis.analysis()

#=========================================================
# K-NN
#=========================================================
    # classify
#    kSet = [3]
#    experiment_number = 1
#
#    for k in kSet:
#        print('This is for k = ', k, '\n')
#        #k_classifier = nn.nearestneighbors(k)
#
#        for i in range(0, experiment_number):
#            k_classifier = nn.nearestneighbors(k)
#            np.random.shuffle(raw_data)
def experiment():

    if request.method == 'POST':

        # get the current value of i
        i = session['i']
        j = session['j']
        # for debugging:
        #print "i: ", i, " j: ", j

        # end experiment and show results if aborted

        if request.form.get('end') or j >= shuffle.blocks: #<<<
            return analysis.analysis()
        if request.form.get('quit'):
            #print "experiment stopped. exit to DOS..."
            return analysis.analysis()
        if request.form.get('abort'):
            #print "experiment aborted. exit to DOS..."
            return analysis.analysis()
        if request.form.get('resume'):
            #print "here we go!"

            # a new array is generated (not nice: the old one is overwritten)
            shuffle.save_array( shuffle.makeShuffle() )

            session['tracker'] = []
            session['timing'] = []

            # count a new block
            session['j'] +=1
            # reset i
            session['i'] = 0

            ###
            i = session['i']
            j = session['j']
            #print "this is the resume status:  i: ", i, " j: ", j


            # end experiment and show results if aborted
            j = session['j']
            if j >= shuffle.blocks: #<<<
                return analysis.analysis()

        # get the array generated for this participant
        array_name = files.get_array()
        this_array = files.load_this(array_name)

        # timing logging
        session['timing'].append( time.time() )
        real_time = str( datetime.datetime.now() )
        real_time = real_time[:real_time.find('.')]


        # if the experiment has not started yet (i=-1), we set it up
        if i < 0:
            # count up -- this will make i == 0 and the next trial will be a real trial
            session['i'] +=1

            # empty the tracker for the new participant

            session['tracker'] = []
            session['timing'] = []

            #print "--- ES GEHT LOS! ---" # for debugging
            # show the starting screen
            return render_template('experiment.html',
                                   i=i,
                                   length=shuffle.trials_per_block )



        # if the experiment has started
        if i >= 0 and i < shuffle.trials_per_block:

            # get the data for the next trial (which will be rendered subsequently)
            express = this_array['expressions'][i]
            ident = this_array['identities'][i]
            mask = this_array['masks'][i]
            # since we will be popping from the mask list in js, we invert it
            mask = mask[::-1]

            # get the stimulus for the next trial
            img = stimuli.stim[ident][express]

            # the array tracking the experiment progression gets a new entry
            # which is a dict containing all properties
            session['tracker'].append(arrays.make_entry())

            # the data of the next trial are appended to the end of the tracker;
            # they can be accessed later
            session['tracker'][-1]['express'] =  express
            session['tracker'][-1]['ident'] = ident
            session['tracker'][-1]['filenames'] = img
            session['tracker'][-1]['time'] = real_time

        # as long as the counter does not exceed the number of trials,
        # run the experiment !
        if i >= 1 and i < shuffle.trials_per_block:
            logwrite.track_response(-2)

            # prevent session tracker from becoming too large (this is an issue because cookies cant exceed 4kb...)
            if len(session['tracker']) > 3:
                del session['tracker'][0]

        # if the run is over
        if i == shuffle.trials_per_block:
            logwrite.track_response(-1)

            # make a plot
            pltname = feedback.plotThis(feedback.feedbackDir,feedback.feedbackFile)

            # the page gets returned, but without stimuli
            return render_template('experiment.html',

                                   i = shuffle.trials_per_block,
                                   j = j,
                                   blocks = (shuffle.blocks-1),
                                   length=shuffle.trials_per_block,
                                   pltname = 'static/feedback/'+pltname

                                  )


        # we increase the counter by one
        session['i'] +=1

        # the page with stimuli and everything gets returned
        # if the experiment is ongoing (i>0 and <length)
        return render_template('experiment.html',
                                # trial tracking
                                i = i,
                                length=shuffle.trials_per_block,
                                # presentation properties
                                squareSize = stimuli.squareSize,
                                waitTime = stimuli.waitTime,
                                numRow = stimuli.numRow,
                                numCol = stimuli.numCol,
                                isi = stimuli.isi,
                                # trial properties
                                express = express,
                                mask = mask,
                                img = img
                              )