Example #1
0
def plotring(image, position_matrix, angles, r, max, step, m):
    im = image[position_matrix == r]
    imang = angles[position_matrix == r]
    imang, im = zip(*sorted(zip(imang, im)))
    mean = Calculate.statistics1d(image, position_matrix=position_matrix,
                                  max=max, step=step, statistic='mean')
    med = Calculate.statistics1d(image, position_matrix=position_matrix,
                                 max=max, step=step, statistic='median')
    std = Calculate.statistics1d(image, position_matrix=position_matrix,
                                 max=max, step=step, statistic=np.std)
    # mean = np.mean(im)
    # med = np.median(im)
    # std = np.std(im)
    print 'std: ', std
    print 'std/mean%: ', std / mean * 100
    print 'abs(mean-median)/mean%: ', np.abs(mean - med) / mean * 100
    thresh = m * std
    size = np.shape(im)[0]
    print 'size: ', size
    meana = np.ones(size) * mean
    meda = np.ones(size) * med
    upperT = np.ones(size) * (mean + thresh)
    lowerT = np.ones(size) * (mean - thresh)
    t = np.arange(0, 2 * np.pi, 2 * np.pi / size)
    plt.ioff()
    plt.plot(t, im, 'r', t, meda, 'b', t, meana, 'g', t, upperT, 'o', t,
             lowerT, 'o')
    plt.show()
Example #2
0
def coma():
    name = 'coma'

    if name not in Materials.aber:
        Materials.aber[name] = {}

    gauss = Calculate.first_para()[-1]['L']

    for k in Materials.K[name]:
        if str(k[0]) + '_' + str(k[1]) not in Materials.aber[name]:
            Materials.K2 = k[1]
            Materials.K1 = 0
            light = Calculate.meri_off()
            y = (light[-1]['L'] - gauss) * math.tan(
                math.radians(light[-1]['U']))

            Materials.K1 = -k[0]
            light = Calculate.meri_off()
            y_b = (light[-1]['L'] - gauss) * math.tan(
                math.radians(light[-1]['U']))

            Materials.K1 = k[0]
            light = Calculate.meri_off()
            y_a = (light[-1]['L'] - gauss) * math.tan(
                math.radians(light[-1]['U']))

            Materials.aber[name][str(k[0]) + '_' +
                                 str(k[1])] = y - (y_a + y_b) / 2
def Draw(para, filename, draw_type):
    data = Calculate.Alogrithim(para[0], para[1], para[2], para[3], para[4], para[5], para[6], para[7])
    fig = plt.figure(figsize = (6,6))
    ax = fig.add_subplot(111, projection = "3d")
    
    #Determine the edges of plots
    edges = []
    for i in range(3):
        ax_min = np.min(data[i])
        ax_max = np.max(data[i])
        gap = (ax_max - ax_min) / 20
        edges.append([ax_min - gap, ax_max + gap])
    
    #Lable some important points
    ax.scatter(data[0,0], data[1,0], data[2,0], lw = 0.8, alpha = 0.5, color = 'b', label = "Start Point")
    ax.scatter(data[0,-1], data[1,-1], data[2,-1], lw = 0.8, alpha = 0.5, color = 'r', label = "Last Point")
    ax.scatter(0, 0, 0, lw = 0.8, color = 'black', alpha = 0.5, label = "Origin")
    
    #for R < 1, the only fixed point is the origin
    if para[4] > 1:
        p = Calculate.fix_point(para[4], para[5], para[6])
        ax.scatter(p[:,0],p[:,1],p[:,2], color = "brown", alpha = 0.5, lw = 0.8, label = "Fixed Points")

    ax.legend(loc = "best")
    # Setting the axes properties
    ax.set_xlim3d(edges[0])
    ax.set_xlabel('X')

    ax.set_ylim3d(edges[1])
    ax.set_ylabel('Y')

    ax.set_zlim3d(edges[2])
    ax.set_zlabel('Z')
    ax.legend(loc = "upper left")

    if draw_type == "anima":
        line, = ax.plot(data[0,0:2], data[1,0:2], data[2,0:2], lw = 0.6, color = 'g')
        time = ax.text(edges[0][0], edges[1][1], edges[2][1], "T = 0")

        # Creating the Animation object
        line_ani = animation.FuncAnimation(fig, update_lines_time, frames = 1000, fargs=(data, line, time, para[3]), blit=False)
        
        #Store the video
        ffmpegpath = os.path.abspath("../../ffmpeg/ffmpeg-20200422-2e38c63-win64-static/ffmpeg-20200422-2e38c63-win64-static/bin/ffmpeg.exe")
        matplotlib.rcParams["animation.ffmpeg_path"] = ffmpegpath
        mywriter = animation.FFMpegWriter(fps = 100)
        line_ani.save(filename, writer=mywriter)
    
    if draw_type == "static":
        #Draw the static final plot
        ax.plot(data[0], data[1], data[2], lw = 0.6, color = 'g')
        ax.text(edges[0][0], edges[1][1], edges[2][1], "[R,P,B] = [{0},{1},{2:.2f}]\n[X,Y,Z] = [{3},{4},{5}]".format(para[4],para[5],para[6],para[0],para[1],para[2]))
        plt.savefig(filename)

    else:
        return "Wrong Draw Type!"
Example #4
0
def spherical():
    name = 'spherical'
    if name + Materials.extend not in Materials.aber:
        Materials.aber[name + Materials.extend] = {}

    gauss = Calculate.first_para()[-1]['L']

    for k in Materials.K[name]:
        if str(k[0]) + '_' + str(k[1]) not in Materials.aber[name +
                                                             Materials.extend]:
            Materials.K1 = k[0]
            Materials.K2 = k[1]
            Materials.aber[name + Materials.extend][str(k[0]) + '_' + str(k[1])] = \
                Calculate.meri_on()[-1]['L'] - gauss
Example #5
0
def off_axis():
    lights = Calculate.meri_off()
    s = Materials.lens[0]['d'] / math.cos(math.radians(lights[0]['U']))
    t = s
    h = []

    for i in range(0, len(Materials.lens)):
        h.append(Materials.lens[i]['r'] *
                 math.sin(math.radians(lights[i]['U'] + lights[i]['I'])))

    for i in range(0, len(Materials.lens)):
        if i == 0:
            n = 1
        else:
            n = Materials.lens[i - 1]['n']

        spie = func_spie(s, n, Materials.lens[i]['n'], Materials.lens[i]['r'],
                         lights[i]['I'], lights[i]['Ipie'])
        tpie = func_tpie(t, n, Materials.lens[i]['n'], Materials.lens[i]['r'],
                         lights[i]['I'], lights[i]['Ipie'])

        if i == len(Materials.lens) - 1:
            s = spie
            t = tpie
        else:
            # 过渡公式
            light = lights[i]
            D = func_D(h[i], h[i + 1], light['L'], light['U'],
                       Materials.lens[i]['r'], n, Materials.lens[i]['n'])
            t = tpie - D
            s = spie - D

    return [s, t]
Example #6
0
def main():
    max_num = 10
    problem = int(input('请输入需要的题目数量:'))
    i = 1
    correct = 0
    wrong = 0
    print("本次测试共{}题,满分100分".format(problem))
    while i < problem + 1:
        ari = Ari_Expression(max_num)
        inf = infix_to_suffix()
        real_res = Calculate.getResult(
            inf.str_to_fraction(inf.to_suffix_expression(ari.str)))
        if real_res >= 0:
            print("------------------------------")
            real_res = ari.str_num(real_res)
            print(str(i) + '. ' + ari.str, end='')
            res = input()
            if res == real_res:
                correct += 1
                print("回答正确!")
            else:
                wrong += 1
                print("回答错误!正确答案为:{}".format(real_res))
            i += 1
    print('\n得分为:' + str(correct / problem * 100))
Example #7
0
def Main():
    if (len(sys.argv) < 2):
        sys.exit("No input file path provided.")
    Path = sys.argv[1]
    ExportPath = None
    if (len(sys.argv) > 2):
        ExportPath = sys.argv[2]
        os.makedirs(ExportPath, exist_ok=True)
        if (not os.path.exists(ExportPath)):
            ExportPath = None

    JSONPath = os.path.splitext(Path)[0] + ".txt"
    if (not os.path.exists(Path)):
        sys.exit("File does not exist.")
    if (not os.path.exists(JSONPath)):
        sys.exit("JSON file does not exist.")

    JSON = ReadJSON(JSONPath, Path)
    JSON = IdentifyRunway.IdentifyRunway(JSON, ExportPath)
    Display.Display(JSON, ExportPath)
    Correct.Correct(JSON, ExportPath)
    JSON = Calculate.find_position(JSON)

    #Export JSON:
    if (ExportPath is not None):
        File = open(ExportPath + "/11 JSON.txt", "w")
        json.dump(JSON, File, indent=4)
        File.close()

    print(json.dumps(JSON, indent=4))
Example #8
0
    def _Calculate_depth_fired(self):
        # take the values from the calculated spectral density
        nu = np.array(self.nu)
        spectrum_y = np.array(self.spectrum_y)
        spectrum_yy = np.array(self.spectrum_yy)

        nu_calc = nu[np.logical_and(nu >= self.spec_min, nu <= self.spec_max)]
        spectrum_y_calc = []
        spectrum_yy_calc = []
        i = 0
        for i in range(0, len(nu)):
            if nu[i] in nu_calc:
                spectrum_y_calc.append(spectrum_y[i])
                if spectrum_yy != []:
                    spectrum_yy_calc.append(spectrum_yy[i])
        # performing lorentzian fits to the spectral data
        spectrum_y_calc = np.array(spectrum_y_calc)
        self.lorentz_fit_parameters_y = Fit.Fit(nu_calc, spectrum_y_calc,
                                                Fit.Lorentzian,
                                                Fit.LorentzianEstimator)
        self.density_plot_fit_y = Fit.Lorentzian(
            *self.lorentz_fit_parameters_y)(nu)
        if spectrum_yy != []:
            spectrum_yy_calc = np.array(spectrum_yy_calc)
            self.lorentz_fit_parameters_yy = Fit.Fit(nu_calc, spectrum_yy_calc,
                                                     Fit.Lorentzian,
                                                     Fit.LorentzianEstimator)
            self.density_plot_fit_yy = Fit.Lorentzian(
                *self.lorentz_fit_parameters_yy)(nu)
        # update the spectral density plots with the fits in it
        self._plot_density_changed(nu, spectrum_y, spectrum_yy)
        # calculate the area from the lorentz fit
        gamma_e = self.gamma_e
        rho = self.proton_density * 1e27  #to get 1/m^3
        Area1 = self.lorentz_fit_parameters_y[2]  #in MHz^2
        print Area1
        #Area1 = 0.0721
        B1 = abs(2 * Area1 / (gamma_e * gamma_e))**(0.5)  #in Tesla
        self.depth1 = Calculate.calculate_depth_simple(B1, rho)
        print B1

        if spectrum_yy != []:
            Area2 = self.lorentz_fit_parameters_yy[2]  #in MHz^2
            print Area2
            B2 = abs(2 * Area2 / (gamma_e * gamma_e))**(0.5)  #in Tesla
            self.depth2 = Calculate.calculate_depth_simple(B2, rho)
            print B2
Example #9
0
def distortion():
    name = 'distortion'
    if name not in Materials.aber:
        Materials.aber[name] = {}

    for k in Materials.K[name]:
        if str(k[0]) + '_' + str(k[1]) not in Materials.aber[name]:
            Materials.K1 = k[0]
            Materials.K2 = k[1]
            gauss = Calculate.first_para()[-1]['L']
            light = Calculate.meri_off()
            yp = -(light[-1]['L'] - gauss) * math.tan(
                math.radians(light[-1]['U']))
            y = Materials.obj['r'] * k[1]
            w = math.radians(Materials.obj['w']) * k[1]
            y0 = -Calculate.height(y, w)

            Materials.aber[name][str(k[0]) + '_' + str(k[1])] = yp - y0
Example #10
0
def format_cvm(Region_Id):
    response = Calculate.get_cvm(Region_Id)
    if response['TotalCount'] != 0:
        result = []
        for line in response['InstanceSet']:
            data = (line.get('InstanceId'), line.get('Placement')['Zone'],
                    line.get('InstanceName'), line.get('InstanceType'),
                    line.get('OsName'), line.get('PrivateIpAddresses')[0])
            result.append(data)
        return result
Example #11
0
def Covid19_Data_Update():
    Calculate.Forming_Covid19_SCV(Calculate.Download_Covid19_SCV())
    frame_L2_frame2[
        'text'] = text = "本日" + Calculate._covid19_data_calculate[0]['公表_年月日']
    f3_Label_02['text'] = text = Calculate._covid19_data_calculate[0][
        '陽性者数(累計)']
    f3_Label_04['text'] = text = Calculate._covid19_data_calculate[0]['入院中']
    f3_Label_06['text'] = text = Calculate._covid19_data_calculate[0]['軽症・中等症']
    f3_Label_08['text'] = text = Calculate._covid19_data_calculate[0]['重症']
    f3_Label_10['text'] = text = Calculate._covid19_data_calculate[0]['死亡']
    f3_Label_12['text'] = text = Calculate._covid19_data_calculate[0]['退院']

    frame_L2_frame1[
        'text'] = text = "昨日" + Calculate._covid19_data_calculate[1]['公表_年月日']
    f2_Label_02['text'] = text = Calculate._covid19_data_calculate[1][
        '陽性者数(累計)']
    f2_Label_04['text'] = text = Calculate._covid19_data_calculate[1]['入院中']
    f2_Label_06['text'] = text = Calculate._covid19_data_calculate[1]['軽症・中等症']
    f2_Label_08['text'] = text = Calculate._covid19_data_calculate[1]['重症']
    f2_Label_10['text'] = text = Calculate._covid19_data_calculate[1]['死亡']
    f2_Label_12['text'] = text = Calculate._covid19_data_calculate[1]['退院']
Example #12
0
File: main.py Project: wxyzwcf/4ze
def main():
    parser = OptionParser()
    parser.add_option("-n",
                      action="store",
                      type="int",
                      dest="problem",
                      default='10',
                      help='题目个数')
    parser.add_option(
        "-r",
        action="store",
        type='int',
        dest="max_num",
        default="10",
        help="题目操作数取值范围",
    )
    i = 1
    (options, args) = parser.parse_args()
    correct = []
    wrong = []
    p = open('Exercises.txt', 'w')
    r = open('Answer.txt', 'w')
    while i < options.problem + 1:
        ari = Ari_Expression(options.max_num)
        inf = infix_to_suffix()
        try:
            real_res = Calculate.getResult(
                inf.str_to_fraction(inf.to_suffix_expression(ari.str)))
        except ValueError:
            continue
        if real_res >= 0:
            real_res = ari.str_num(real_res)
            print(str(i) + '. ' + ari.str, end='')
            p.write(str(i) + '. ' + ari.str + '\n')
            r.write(str(i) + '. ' + real_res + '\n')
            res = input()
            if res == real_res:
                correct.append(i)
            else:
                wrong.append(i)
            i += 1
    p.close()
    r.close()
    print('题目正确率:' + str(100 * len(correct) / options.problem) + '%')
    g = open('Grade.txt', 'w')
    g.write('Correct:' + str(len(correct)) + '(')
    for x in correct:
        g.write(str(x) + ', ')
    g.write(')\n')
    g.write('Wrong:' + str(len(wrong)) + '(')
    for x in wrong:
        g.write(str(x) + ', ')
    g.write(')\n')
Example #13
0
def plotmask(maskwoutput=None, maxr=None, xray_image=None,
             position_matrix=None):
    """
    Plots the mask metrics allowing for evaluation of mask quality
    """

    # highper,lowper,totalper, mask, imask,StatsA=maskwoutput
    highper, lowper, totalper, mask, imask = maskwoutput
    # May need matplotlib help from Tom on this
    plt.ioff()
    plt.clf()
    # plot the graph of how the mask depends on radial distance
    # TODO: use Q instead of r in the final product?
    plt.plot(np.arange(0, maxr + 1, 1), highper[:], '-',
             np.arange(0, maxr + 1, 1), lowper[:], 'o',
             np.arange(0, maxr + 1, 1), totalper[:], '^')
    plt.show()
    # Plot the mask itself
    # plot the overlay of the mask and the image
    mask = plt.imshow(mask)
    plt.show()
    imagemask = plt.imshow(xray_image * imask)
    plt.show()
    t = np.arange(0, maxr, 1)
    plt.plot(t, Calculate.mean1d(xray_image,
                                 position_matrix=position_matrix * imask,
                                 step=1), 'b',
             # t, Median1D(xray_image, max=maxr, step=1, position_matrix=position_matrix*imask), 'r',\
             # t,Median1D(xray_image, max=maxr, step=1, position_matrix=position_matrix*imask)-Mean1D(xray_image, max=maxr, step=1, position_matrix=position_matrix*imask)
    )
    plt.show()

    oldstd = np.sqrt(
        Calculate.variance1d(xray_image, position_matrix=position_matrix,
                             max=maxr, step=1))
    newstd = np.sqrt(Calculate.variance1d(xray_image,
                                          position_matrix=position_matrix * imask,
                                          max=maxr, step=1))
    plt.plot(t, oldstd, 'b', t, newstd, 'r', t, oldstd - newstd, 'g')
    plt.show()
Example #14
0
def InvokeMultiFunction(f, values):
    l = []
    nf = f.copy()
    num = nf.pop(0)
    variables = []
    for i in range(num):
        variables.append(nf.pop(0))
    for i in nf:
        if i in variables:
            l.append(str(values[variables.index(i)]))
        else:
            l.append(i)
    return Calculate.RPNCalc(l)
Example #15
0
def mag_chromatism():
    name = 'mag_chromatism'

    if name not in Materials.aber:
        Materials.aber[name] = {}

    for k in Materials.K[name]:
        if str(k[0]) + '_' + str(k[1]) not in Materials.aber[name]:
            Materials.K1 = k[0]
            Materials.K2 = k[1]
            gauss = Calculate.first_para()[-1]['L']
            light = Calculate.meri_off()
            y_d = -(light[-1]['L'] - gauss) * math.tan(
                math.radians(light[-1]['U']))

            for item in range(0, len(Materials.lens)):
                Materials.lens[item]['n'] = Materials.nf[item]
            Materials.extend = '_f'
            light = Calculate.meri_off()
            y_f = -(light[-1]['L'] - gauss) * math.tan(
                math.radians(light[-1]['U']))

            for item in range(0, len(Materials.lens)):
                Materials.lens[item]['n'] = Materials.nc[item]
            Materials.extend = '_c'
            light = Calculate.meri_off()
            y_c = -(light[-1]['L'] - gauss) * math.tan(
                math.radians(light[-1]['U']))

            Materials.aber[name][str(k[0]) + '_' + str(k[1])] = y_f - y_c
            Materials.basic['d height'] = y_d
            Materials.basic['F height'] = y_f
            Materials.basic['C height'] = y_c

    for item in range(0, len(Materials.lens)):
        Materials.lens[item]['n'] = Materials.nd[item]

    Materials.extend = ''
Example #16
0
def curvature():
    name = 'curvature'

    if name not in Materials.aber:
        Materials.aber[name + '_s'] = {}
        Materials.aber[name + '_t'] = {}
        Materials.aber['astigmatism'] = {}

    for k in Materials.K[name]:
        if str(k[0]) + '_' + str(k[1]) not in Materials.aber[name + '_s']:
            Materials.K1 = k[0]
            Materials.K2 = k[1]
            gauss = Calculate.first_para()[-1]['L']
            out = OffAxis.off_axis()
            light = Calculate.meri_off()[-1]
            cos_u = math.cos(math.radians(light['U']))

            Materials.aber[name + '_s'][str(k[0]) + '_' +
                                        str(k[1])] = out[0] * cos_u - gauss
            Materials.aber[name + '_t'][str(k[0]) + '_' +
                                        str(k[1])] = out[1] * cos_u - gauss
            Materials.aber['astigmatism'][str(k[0]) + '_' + str(k[1])] = \
                Materials.aber[name + '_t'][str(k[0]) + '_' + str(k[1])] - \
                Materials.aber[name + '_s'][str(k[0]) + '_' + str(k[1])]
Example #17
0
def main():
    log()

    global strings
    strings = get_yml("./strings.yml")
    Calculate.set_strings(strings)

    updater = Updater(cfg['agecalculator']['bottoken'])

    dp = updater.dispatcher

    dp.add_handler(CommandHandler(
        "start",
        start,
        pass_chat_data=True,
    ))
    dp.add_handler(MessageHandler(Filters.text, send, pass_chat_data=True))
    dp.add_handler(
        CallbackQueryHandler(try_button,
                             pass_chat_data=True,
                             pass_job_queue=True))

    updater.start_polling()
    updater.idle()
Example #18
0
def update_total(bot, job):
    update, chat_data, job_queue = job.context
    if "cur" in chat_data and chat_data[
            "cur"] == 'total' and chat_data['counter'] > 0:
        text = strings[chat_data["lang"]][
            "total"] + ":\n" + Calculate.total_time(chat_data)
        try:
            update.callback_query.message.edit_text(
                get_text(chat_data) + "\n\n" + text,
                parse_mode=ParseMode.MARKDOWN,
                reply_markup=get_result_keyboard(1, chat_data))
            chat_data['counter'] = chat_data['counter'] - 1
        except error.BadRequest:
            pass
        job_queue.run_once(update_total,
                           1,
                           context=[update, chat_data, job_queue])
def Setting(ip, port):
    serverSock = socket(AF_INET, SOCK_STREAM)
    print("소켓시작")
    serverSock.bind((ip, port))
    print("bind")
    serverSock.listen(1)
    print("listen")

    while (1):
        connectionSock, addr = serverSock.accept()

        print(str(addr), '에서 접속이 확인되었습니다.')

        data = connectionSock.recv(1024).decode()
        data_2 = Calculate.in_data(data)
        data_2 = data_2.split(",")
        data_3 = Trilateration.Trilateration(data_2)  # 삼변측량알고리즘 적용
        return data_3  #위치좌표 X,Y반
Example #20
0
def format_clb(Region_Id):
    response = Calculate.get_clb(Region_Id)
    if response['TotalCount'] != 0:
        result = []
        for line in response['LoadBalancerSet']:
            if line.get('Forward') == 1:
                Type = '应用型'
            else:
                Type = '传统型'
            if line.get('Status') == 1:
                Status = '正常运行'
            else:
                Status = '创建中'
            data = (line.get('LoadBalancerId'), Region_Id,
                    line.get('LoadBalancerName'), Type, Status,
                    line.get('LoadBalancerVips')[0])
            result.append(data)
        return result
Example #21
0
 def BTNEquals(self):
     #Passes the information to the calculator module
     #Try method to prevent crashes
     try:
         self.ids.input.text = ""
         a = self.ids.result.text
         a = a.split()
         calc = []
         for x in a:
             if x != " ":
                 #add it
                 calc.append(x)
             else:
                 pass
         a = Calculate.calculate(calc)
         #a = Calcula.tor.SingularMethod(self.ids.result.text)
         self.ids.input.text = str(a)
     except:
         return
Example #22
0
def equalpress():
    # Try and except statement is used
    # for handling the errors like zero
    # division error etc.

    # Put that code inside the try block
    # which may generate the error
    try:
        global expression

        # calculate
        c = Calculate.Calculate(expression)
        total = str(c.process())
        equation.set(total)
        # reinitialze the expression variable
        # by empty string
        expression = ""

    except:

        equation.set(" error ")
        expression = ""
Example #23
0
def main():
    global turn, stand, player, dealer
    run = True
    n = Network.Network()
    player = eval(n.getPlayer())
    print(type(player))
    p1 = Player.Player(player)
    dealer = eval(n.send('dealer'))
    print(type(dealer))
    p2 = Player.Player(dealer)
    hit_button = Button.Button(light_grey, 50, 625, 200, 70, 'Hit')
    stand_button = Button.Button(light_grey, 300, 625, 200, 70, 'Stand')
    result = Calculate.Calculate(dealer, player)
    while run:
        pos = pygame.mouse.get_pos()
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                run = False
                pygame.quit()
                sys.exit()
            if event.type == pygame.MOUSEBUTTONDOWN:
                if hit_button.isOver(pos):
                    if turn % 2 == 0:
                        player.append(n.send('draw'))
                    turn += 1
                if turn % 2 == 1 and not result.handcheck():
                    dealer.append(n.send('draw'))
                    turn += 1
                if stand_button.isOver(pos):
                    stand = True
                    turn += 1
                    if stand:
                        n.send(result.handcal())

        redrawWindow(win, p1, p2, hit_button, stand_button, result)
        clock.tick(60)
def tif_to_iq(
        # Calibration info
        poni_file=None, dspace_file=None, detector=None,
        calibration_imagefile=None, wavelength=None,
        # image info
        image_path=None, out_ending='',
        # Correction info
        solid=False, pol=True, polarization=0.95,
        # old_files info
        generate_mask=False, mask_file=None, initial_m=None,
        # integration info
        space='Q', resolution=None, average_method='median',
        plot_verbose=False):
    """
    Takes TIFF images to CHI files, including masking, integrating, and CHI
    writing

    Parameters
    ----------
    poni_file: str
        Location of poni file, a detector geometry file produced by pyFAI.
        If none is given a GUI prompt will ask for the file.
    dspace_file : str
        Location of the calibrant d-spacing file.  This is used to generate
        the poni file if no poni file is loaded.
    detector: str
        Name of the detector. This is used to generate
        the poni file if no poni file is loaded.
    calibration_imagefile : str
        Location of the calibration image.  This is used to generate
        the poni file if no poni file is loaded
    wavelength: float
        The wavelength of light in angstrom. This is used to generate
        the poni file if no poni file is loaded.
    image_path: str or list of str or tuple of str
        The image(s) to be masked and integrated.  If more than one image is
        passed via a list or tuple, sum the images together before proceeding.
    out_ending: str
        The ending to append to the output file name before the extension
    solid: bool
        If true, apply solid angle correction as calculated by pyFAI, else do not
    pol: bool
        If true, apply polarization angle correction
    polarization: float
        The polarization of the beam used in the polarization correction
    generate_mask: bool
        If true, generate mask via mask writing algorithum
    mask_file: str
        Location of mask file
    initial_m: float
        Initial guess at ring threshold mask scale factor
    space: {'Q'}
        Eventually will support TTH as well
    resolution:
        The resolution in Q or TTH of the detector
    average_method: str or function
        The function to use in binned statistics to calculate the average
    plot_verbose: bool
        If true, generate a bunch of plots related to statistics of the rings

    Returns
    -------
    str:
        The name of the chi file
    str:
        The name of the mask, if generated
    """

    # load the calibration file/calibrate
    resolution = resolution if resolution is not None else .02
    a = IO.calibrate(poni_file, calibration_imagefile, dspace_file, wavelength,
                     detector)

    # load image
    xray_image, bulk_image_path_no_ext, image_path = IO.loadimage(image_path)

    # mask image, expect out an array of 1s and 0s which is the mask, if the
    # mask is 0 then mask that pixel
    mask_array = None
    if generate_mask is True:

        """This should handle all mask writing functions including binning,
        applying criteria, plotting, and writing the mask out as a numpy
        array where masked pixels are zero.  This should also give the
        option to combine the calculated mask with a previous user
        generated mask."""

        mask_array = Calculate.write_mask(xray_image, a, mask_file, initial_m,
                                          resolution)
        # write mask to FIT2D file for later use
        mask_file_name = IO.fit2d_save(mask_array, bulk_image_path_no_ext)
    elif mask_file is not None:
        # LOAD THE MASK
        mask_array = IO.loadmask(mask_file)
        # mask_array = np.abs(mask_array-1)
    if mask_array is None:
        mask_array = np.ones(np.shape(xray_image))

    if plot_verbose:
        plt.imshow(mask_array, cmap='cubehelix'), plt.colorbar()
        plt.savefig('/home/christopher/mask.png', bbox_inches='tight',
                    transparent=True)
        plt.show()

        plt.imshow(mask_array*xray_image, cmap='cubehelix'), plt.colorbar()
        plt.show()

    # correct for corrections
    corrected_image = Calculate.corrections(xray_image, geometry_object=a,
                                            solid=solid, pol=pol,
                                            polarization_factor=polarization)

    position_matrix = None
    # TODO: Add options for tth
    if space == 'Q':
        position_matrix = a.qArray(np.shape(xray_image)) / 10

    max_position = np.max(position_matrix)
    masked_position = position_matrix * mask_array

    # Integrate using the global average
    independant_out_array = np.arange(0, max_position + resolution, resolution)

    integrated = Calculate.statistics1d(corrected_image,
                                        position_matrix=masked_position,
                                        max=max_position, step=resolution,
                                        statistic=average_method)
    integrated[np.isnan(integrated)] = 0

    uncertainty_array = np.sqrt(
        Calculate.variance1d(corrected_image, position_matrix=masked_position,
                             max=max_position, step=resolution))

    uncertainty_array[np.isnan(uncertainty_array)] = 0

    if plot_verbose is True:
        no_mask_integrated = Calculate.statistics1d(corrected_image,
                                                    position_matrix=position_matrix,
                                                    max=max_position,
                                                    step=resolution,
                                                    statistic=average_method)
        integrated_mean = Calculate.statistics1d(corrected_image,
                                                 position_matrix=masked_position,
                                                 max=max_position,
                                                 step=resolution,
                                                 statistic='mean')
        fig, ax = plt.subplots()
        ax.plot(independant_out_array, integrated, 'g', label='Median I[Q]')
        ax.plot(independant_out_array, integrated_mean, 'b', label='Mean I[Q]')
        ax.plot(independant_out_array, ((integrated - integrated_mean)/((
                                                                            integrated+integrated_mean)/2))*10e6,
                                        'k', label='((Median-Mean)/('
                                                   'Median+Mean)/2)x10^6')
        ax.legend(loc='upper right')
        plt.xlabel('Q (1/A)')
        plt.ylabel('Raw Counts')
        plt.savefig('/home/christopher/integrator.png', bbox_inches='tight',
                    transparent=True)
        plt.show()

        no_mask_uncertainty_percent_array = np.sqrt(
            Calculate.variance1d(corrected_image,
                                 position_matrix=position_matrix,
                                 max=max_position, step=resolution)) / no_mask_integrated
        fig, ax = plt.subplots()
        ax.plot(independant_out_array, integrated, 'g', label='I[Q]')
        ax.plot(independant_out_array, no_mask_integrated, 'k', label='No '
                                                                      'old_files I[Q]')
        ax.plot(independant_out_array, integrated - no_mask_integrated, 'r',
                label='Difference')
        legend = ax.legend(loc='upper right')
        plt.show()

        old_settings = np.seterr(divide='ignore')
        uncertainty_percent_array = uncertainty_array / integrated
        uncertainty_percent_array[np.isnan(uncertainty_percent_array)] = 0
        np.seterr(**old_settings)

        fig, ax = plt.subplots()
        ax.plot(independant_out_array, uncertainty_percent_array * 100, 'g',
                label='Sigma I[Q]')
        plt.xlabel('Q (1/A)')
        plt.ylabel('% uncertainty')
        legend = ax.legend(loc='upper right')
        plt.show()

        ring_std = Calculate.statistics1d(corrected_image,
                                          position_matrix=masked_position,
                                          max=max_position, step=resolution,
                                          statistic=np.std) / integrated

        ring_std_no_mask = Calculate.statistics1d(corrected_image,
                                                  position_matrix=position_matrix,
                                                  max=max_position,
                                                  step=resolution,
                                                  statistic=np.std) / no_mask_integrated
        ring_s = Calculate.statistics1d(corrected_image,
                                        position_matrix=masked_position,
                                        max=max_position, step=resolution,
                                        statistic=stats.skew)

        ring_s_no_mask = Calculate.statistics1d(corrected_image,
                                                position_matrix=position_matrix,
                                                max=max_position,
                                                step=resolution,
                                                statistic=stats.skew)
        fig, ax = plt.subplots()
        ax.plot(independant_out_array, ring_std, 'g',
                label='Ring Standard Deviation')
        ax.plot(independant_out_array, ring_std_no_mask, 'b',
                label='Ring Standard Deviation No old_files')
        legend = ax.legend(loc='upper right')
        plt.show()

        fig, ax = plt.subplots()
        ax.plot(independant_out_array, ring_s, 'g',
                label='Ring Skew')
        ax.plot(independant_out_array, ring_s_no_mask, 'b',
                label='Ring Skew No old_files')
        legend = ax.legend(loc='upper right')
        plt.show()

        # plt.plot(independant_out_array, test_uncertainty / integrated * 100)
        # plt.show()

        plt.plot(
            independant_out_array, integrated, 'g',
            independant_out_array, integrated - uncertainty_array, 'b',
            independant_out_array, integrated + uncertainty_array, 'r'
        )
        plt.show()

        # plt.plot(
        # independant_out_array, integrated, 'g',
        #     independant_out_array, integrated - test_uncertainty, 'b',
        #     independant_out_array, integrated + test_uncertainty, 'r'
        # )
        # plt.show()
    # Save file as chi file with uncertainties
    out_array = np.c_[independant_out_array, integrated, uncertainty_array]
    # print out_array.shape
    chi_filename = os.path.splitext(bulk_image_path_no_ext)[0] + out_ending
    out_file_name = IO.save_chi(out_array, a, chi_filename, mask_file, image_path)
    if generate_mask is True:
        return out_file_name, mask_file_name
    return out_file_name
Example #25
0
def ripple_minima(chi_file, background_file, background_level, composition,
                  qmin, qmaxinst):
    pwd = os.getcwd()
    if os.path.exists(os.path.join(pwd, 'ripple_minima_test')) is False:
        os.mkdir(os.path.join(pwd, 'ripple_minima_test'))
    ripple_list = []
    gr_list = []
    qmax_min = 18.5
    qmax_max = qmaxinst
    qmax_step = .01
    print 'Start qmax refinement'
    for qmax in np.arange(qmax_min, qmax_max, qmax_step):
        print qmax
        gr_file, bg, q=Calculate.pdfgetx3(
            chi_file,
            background_file,
            background_level,
            composition,
            qmax,
            qmin,
            qmax,
            'QA',
            os.path.join(pwd, 'ripple_minima_test', '@b_ripple_test_'+str(
                qmax).ljust(5,'0')+'.@o'))
        gr_file = os.path.join(pwd, 'ripple_minima_test', os.path.split(
            os.path.splitext(chi_file)[0])[1]+'_ripple_test_'+str(qmax).ljust(5,'0')+'.gr')
        x, y = IO.load_gr_file(gr_file)
        w = y - np.convolve(y, np.ones(3)/3, 'same')
        ripple_sum = np.sum(abs(w))
        ripple_list.append(ripple_sum)
        gr_list.append(gr_file)
    t = np.arange(qmax_min, qmax_max, qmax_step)
    ripple_array=np.array(ripple_list)
    minima_index = signal.argrelextrema(ripple_array, np.greater_equal,
                                        order=2*len(ripple_array)/100)[0]
    minima = ripple_array[minima_index]
    minima_q = t[minima_index]

    maxima_index = signal.argrelextrema(ripple_array, np.less_equal,
                                        order=2*len(ripple_array)/100)[0]
    maxima = ripple_array[maxima_index]
    maxima_q = t[maxima_index]

    plt.plot(t, ripple_array, 'g')
    plt.plot(minima_q, minima, 'o', markerfacecolor='none', mec='r')
    plt.plot(maxima_q, maxima, 'o', markerfacecolor='none', mec='b')
    plt.title('Ripples in PDF')
    plt.xlabel('Qmax (1/A)')
    plt.ylabel('Ripple Cumulative Sum')
    plt.show()
    plt.cla()

    # data_list = []
    # key_list = []
    # for minima_q_point in minima_q:
    #     data = IO.load_gr_file([x for x in gr_list if str(qmax) in x] [0])
    #     data_list_element = data
    #     key_list_element = minima_q_point
    #     data_list.append(data_list_element)
    #     key_list.append(key_list_element)
    # plot_stack(data_list, key_list)

    ziped_minima = zip(minima_q, minima)
    while True:
        for q, value in ziped_minima:
            print 'q= ', q,'rippleness= ', value
        qmax= float(raw_input('Please pick a qmax.  qmax= '))
        gr_file = [x for x in gr_list if str(qmax) in x] [0]
        print gr_file
        x, y = IO.load_gr_file(gr_file)
        plt.plot(x, y)
        plt.show()
        if raw_input('Is this the file you wanted? y/[n] ') == 'y':
            break
    return qmax
Example #26
0
Created on 2015��12��9��

@author: Rhapsody
'''

import numpy as np
import pylab as pl
import GeneData
import Calculate

def func(x):
    return np.sin(x * 2.0 * np.pi)

n = 10
m = 20
sqare = 0.1
x, y0 = GeneData.average(n, 0.0, 1.0, func)
y1 = np.random.normal(0, sqare, n) + y0
_w = Calculate.descent(x, y1, m, 100000, 0.01, Calculate.func_df)

w = list(_w[0])

print 'w =', w

pl.plot(x, y0, label='Pure')
pl.plot(x, y1, 'o', label='Impurity')
_x, _y = Calculate.showLine(0.0, 1.0, w)
pl.plot(np.array(_x), np.array(_y), label='Around')

pl.legend()
pl.show()
def write_pdf(chi_file=None, background_file=None, background_level=None,
              pdf_format='QA', output='@r.@o', qmax='statistics',
              composition=None, qmaxinst=0.0,
              qmin=0.0, relative_max_uncertainty=.90,
              plot_verbose=False, bg_refine_qmin_index=None,
              bg_refine_qmax_index=None):
    """
    Generates the G(r) and F(q) files, potentially using optimized values
    for the background subtraction and qmax determination

    Parameters
    ----------
    chi_file: str
        Location of the chi file to use as the foreground to generate the PDF
    background_file: str
        Location of the chi file to use as the background
    background_level: float
        The background level, if not set automatically generate the background
        scale
    pdf_format: str
        The format of the chi file
    out_put: str
        The naming convention for the output PDF
    qmax: float or {'statistics', 'ripple'}
        If float, the qmax value to be used in the FFT.  If statistics,
        generate qmax by examining the statistical uncertainty of I(Q).  If
        ripple generate the PDFs at various qmax and compare the rippleness
        between the various qmax values
    composition: str
        The compostion of the material to be studied
    qmaxinst: float
        The maximum reliable qmax generated by the detector, if 0.0 the max
        of the chi file
    qmin: float
        The minimum q to be used in the FFT
    relative_max_uncertainty: float [0,1]
        If qmax is statistics the percentile of the uncertainty to except
    plot_verbose: bool
        If true, generate plots
    bg_refine_qmin_index: int
        The lower bound on the range of values to fit between the background
        and foreground for determining the background level.
    bg_refine_qmax_index: int
        The upper bound on the range of values to fit between the background
        and foreground for determining the background level.

    Returns
    -------
    str:
        The file name of the output
    float:
        The background level
    float:
        The qmax
    int:
        The background refine lower bound
    int:
        The background refine upper bound
    """
    #Get composition
    if composition is None:
        composition = pdf_parameters.generate_composition()
    # load up sample I[Q]
    chi, chi_file = IO.load_chi_file(chi_file)
    if os.path.split(chi_file)[0] != '':
        os.chdir(os.path.split(chi_file)[0])
    sample_q, sample, uncertainty = chi[:, 0], chi[:, 1], chi[:, 2]

    if qmaxinst > np.amax(sample_q):
        qmaxinst = np.amax(sample_q)
    elif qmaxinst == 0:
        qmaxinst = np.amax(sample_q)
    # perform background subtraction
    if background_file is not '' and background_level is None:
        # implies that you want to load a background file
        background_level, background_q, background, background_file,  \
        bg_refine_qmin_index, bg_refine_qmax_index = \
            pdf_parameters.background_subtract(sample, background_file,
                                               background_level,
                                               bg_refine_qmin_index=bg_refine_qmin_index,
                                               bg_refine_qmax_index=bg_refine_qmax_index)

        if plot_verbose is True:
            fig, ax = plt.subplots()
            ax.plot(sample_q, sample, 'g', label='Sample')
            ax.legend(loc='upper right')
            plt.title('Sample I(Q)')
            plt.xlabel('Q (1/A)')
            plt.ylabel('Raw Counts')
            plt.savefig('/home/christopher/sample.png',
                        bbox_inches='tight',
                        transparent=True)
            plt.show()

            fig, ax = plt.subplots()
            ax.plot(sample_q, background, 'b',
                    label='Background')
            ax.legend(loc='upper right')
            plt.title('Background (IQ)')
            plt.xlabel('Q (1/A)')
            plt.ylabel('Raw Counts')
            plt.savefig('/home/christopher/background.png',
                        bbox_inches='tight',
                    transparent=True)
            plt.show()

            fig, ax = plt.subplots()
            ax.plot(sample_q, sample, 'g', label='Sample')
            ax.plot(sample_q, background * background_level, 'b',
                    label='Scaled Background')
            ax.plot(sample_q, sample - background * background_level, 'k',
                    label='Sample-Background')
            ax.legend(loc='upper right')
            plt.title('Background Subtraction')
            plt.xlabel('Q (1/A)')
            plt.ylabel('Raw Counts')
            plt.savefig('/home/christopher/integrated_minus_background.png',
                        bbox_inches='tight',
                    transparent=True)
            plt.show()
    # Determine Qmax
    if qmax is 'statistics':
        qmax = pdf_parameters.qmax_statistics(sample_q, sample, uncertainty,
                                        relative_max_uncertainty)
    elif qmax is 'ripple':
        qmax = pdf_parameters.ripple_minima(chi_file, background_file,
                                            background_level, composition,
                                            qmin, qmaxinst)
    if plot_verbose is True:
        plt.plot(sample_q, uncertainty/sample * 100)
        plt.show()
    # PDFGETX3 call/ Generate PDF
    gr_file, background_level, qmax = Calculate.pdfgetx3(chi_file,
                                                     background_file, background_level,
                       composition, qmax, qmin, qmaxinst, pdf_format, output)
    return gr_file, background_level, qmax, bg_refine_qmin_index, bg_refine_qmax_index
Example #28
0
def background_subtract(sample, background_file=None, background_level=None,
                        bg_refine_qmin_index=None, bg_refine_qmax_index=None):
    """
    Wrap the background subtraction optimization with file IO

    Parameters
    ----------
    :param bg_refine_qmin_index:
    sample: ndarray
        Sample I(Q) array
    background_file: str, optional
        The file name of the background file, if not specified give an IO
        gui to select the file, f '' no file is loaded
    background_level: float, optional
        The background level if none given, calculate via minimization

    Returns
    -------
    float:
        The background level
    array:
        The background array
    array:
        The background q array
    str:
        The name of the background file

    """
    #File IO
    background_data, background_file = IO.load_chi_file(
        background_file)
    #Unpack file IO
    if background_data is not None:
        background = background_data[:, 1]
        background_q = background_data[:, 0]
        percentile_peak_pick = np.where(
            background >= np.percentile(background,  98))[0]
        print len(percentile_peak_pick)
        print percentile_peak_pick[0]
        print percentile_peak_pick[-1]
        if len(percentile_peak_pick) >= 2:
            bg_refine_qmin_index = percentile_peak_pick[0]
            bg_refine_qmax_index = percentile_peak_pick[-1]
        else:

            if bg_refine_qmax_index is None:
                plt.plot(background)
                plt.plot(sample)
                plt.show()
                bg_refine_qmax_index = int(raw_input('High end of the '
                                                     'background to fit: '))
            if bg_refine_qmin_index is None:
                plt.plot(background)
                plt.plot(sample)
                plt.show()
                bg_refine_qmin_index = int(raw_input('Low end of the '
                                                     'background to fit: '))
        # Fit the background
        if background_level is None:
            background_level = Calculate.optimize_background_level(background,
                                                                   sample,
                                                                   bg_refine_qmin_index,
                                                                   bg_refine_qmax_index)
            print 'background level= ', background_level
    else:
        background = None
        background_q = None
    return background_level, background_q, background, background_file, \
           bg_refine_qmin_index, bg_refine_qmax_index
Example #29
0
# Syntax for import is:

# import modulename

#  OR

# import module1, module2,... module n.
   
# Here is the program which is used to import module "Calculate.py" using import statement.

# importing "Calculate.py" using import statement.
    
import Calculate

# Getting user input for variables a and b.

a = int( input ( "\nEnter a value for \"a\" : " ) )

b = int( input ( "\nEnter a value for \"b\" : " ) )

# Accessing the functionality present in the module "Calculate" using dot( . ) operator.

Calculate.Add( a,b )

Calculate.Sub( a,b )

Calculate.Multi( a,b )

Calculate.Divide( a,b )

print( ) # Used to print new line for readability.
Example #30
0
itr = 50
alpha= 20.0
#------------------------------------------

U = Start.start(l,N).cold_start() 

ll = 1

while (ll < itr+1): 
     for s in range(l):
           for t in range(l):
               for r in range(2):
                   
                      U = Update.link(r,U,s,t,l,alpha,N)
              
              
                                  
     avp = Calculate.avplqt(U,l,N)  
     #avp = Wilson11(U0)            
     print avp
     plt.figure(1)
     plt.scatter(ll,avp)
     plt.show()
        
     ll = ll+1 
   
   
   

 
Example #31
0
#-------- DAY 53 "exercise" --------
import Calculate as cal
import datetime

x = datetime.datetime.now()
print(x.strftime('%c'), '\n---------------------')
print('We have the basic operations wich are: +, -, * and /')
print('Examples for each one:')
print('1 + 8 =', cal.add(1, 8))
print('4 - 2 =', cal.subtract(4, 2))
print('6 * 6 =', cal.multiply(6, 6))
print('8 / 2 =', cal.divide(8, 2))
Example #32
0
import request_and_store
import Calculate
import graph
import translation

if __name__ == "__main__":
    language = input("Language choice: ")
    abr = translation.lang_abr(language)
    product = input(
        translation.translateText(
            abr,
            "Type of food you are looking for (Ex. pork, beef, fish, etc.): "))
    mode = input(
        translation.translateText(
            abr,
            "What do you care about the most? 1 for calories, 2 for cost, 3 for nutritients: "
        ))
    product = translation.translateText("en", product)

    while True:
        if mode != "1" and mode != "2" and mode != "3":
            mode = input("Invalid mode! Try again:")
        else:
            break
    request_and_store.main(product)
    Calculate.calculate_cost_and_write(abr, product)
    graph.main(mode, product)
Example #33
0
def ProcessSrcmod(fileName):
    print '\nStarting calculations for ' + fileName + '...\n'
    FSBFilesFolder = './' #path to folder with fsp file
    # Parameters for CFS calculation and visualization
    lambdaLame = 3e10 # First Lame parameter (Pascals)
    muLame = 3e10 # Second Lame parameter (shear modulus, Pascals)
    coefficientOfFriction = 0.4 # Coefficient of friction
    obsDepth = 0; # depth of observation coordinates just for disc visualization
    useUtm = True
    catalogType = 'REVIEWED'
    # Options are:
    # 'COMPREHENSIVE': most earthquakes. Not human reviewed
    # 'REVIEWED': slightly fewer earthquakes. Some human quality control
    # 'EHB': many fewer earthquakes. Human quality control and precise relocations
    captureDays = 365 # Consider ISC earthquakes for this many days after day of main shock
    nearFieldDistance = 100e3 # Keep only those ISC earthquakes withing this distance of the SRCMOD epicenter
    spacingGrid = 5e3
    maxDepth = 50000
    zBuffer = -1.*np.arange(0, maxDepth+spacingGrid, spacingGrid)
    zBufferFillGridVec = -1.*(np.arange(spacingGrid, maxDepth+spacingGrid, spacingGrid)-spacingGrid/2.)

    # get exact time of earthquake
    with open('MainShockTimes.csv') as csvfile:
        reader = csv.DictReader(csvfile)
        for row in reader:
            if (row['earthquake'][2:-10] == fileName[1:-4]):
                ExactStart = row['time']
                datetime_Exact = datetime.datetime.strptime(ExactStart[:-4], '%Y-%m-%d %H:%M:%S')

    print 'Reading in SRCMOD fault geometry and slip distribution for this representation of the event...\n'
    EventSrcmod = Readsrcmod.ReadSrcmodFile(fileName, FSBFilesFolder)

    print 'Calculating regular grid over region inside fault buffer...\n'
    xBuffer, yBuffer, polygonBuffer = Calculate.calcFaultBuffer(EventSrcmod, nearFieldDistance)
    
    print 'Calculating fault buffer grid points...\n'
    xBufferFillGridVec, yBufferFillGridVec, gridcellsInBuffer = Calculate.calcBufferGridPoints(xBuffer, yBuffer, polygonBuffer, spacingGrid)

    print 'Generating unique grid identifiers and flattening observation arrays...\n'
    gridIdFlat, xBufferFillGridVecFlat, yBufferFillGridVecFlat, zBufferFillGridVecFlat = Calculate.getGridIdNames(xBufferFillGridVec, yBufferFillGridVec, zBufferFillGridVec)

    ti = time.time()
    print 'Calculating displacement vectors and stress tensors at observation coordinates...'
    DisplacementVectorBuffer, StrainTensorBuffer, StressTensorBuffer, Distances = Calculate.calcOkadaDisplacementStress(xBufferFillGridVecFlat, yBufferFillGridVecFlat, zBufferFillGridVecFlat, EventSrcmod, lambdaLame, muLame)
    print 'Calculations took ' + str(time.time()-ti) + ' seconds.\n'

    print 'Resolving Coulomb failure stresses on receiver planes...\n'
    Cfs, BigBig = Calculate.StressMasterCalc(EventSrcmod, StressTensorBuffer, StrainTensorBuffer, DisplacementVectorBuffer, coefficientOfFriction)

    print 'Getting start/end dates...'
    startDateTime = EventSrcmod['datetime']
    datetime_ExactStart = datetime_Exact + datetime.timedelta(seconds=1)
    assert datetime_ExactStart - datetime.timedelta(hours=36) < startDateTime < datetime_ExactStart + datetime.timedelta(hours=36)
    endDateTime = datetime_ExactStart + datetime.timedelta(days=captureDays) - datetime.timedelta(seconds=1)
    
    ti = time.time()
    print 'Reading in ISC data for capture days after the date of the SRCMOD event...'
    Catalog = ISCFunctions.getIscEventCatalog(datetime_ExactStart, endDateTime, EventSrcmod, catalogType)
    print 'ISC retrieval and time/quality filtering took ' + str(time.time()-ti) + ' seconds in total.\n'

    print 'Events within the fault buffer...\n'
    Catalog = ISCFunctions.getNearFieldIscEventsBuffer(Catalog, EventSrcmod, polygonBuffer)
    
    ti = time.time()
    print 'Binning the events by grid cell and assembing magnitudes, times, counts, etc...\n'
    Counts = ISCFunctions.binNearFieldIscEventsBuffer(Catalog, EventSrcmod, polygonBuffer, gridcellsInBuffer, zBuffer)
    
    print 'Binning ISC events took ' + str(time.time()-ti) + ' seconds.\n'

    BigBig['x'] = xBufferFillGridVecFlat
    BigBig['y'] = yBufferFillGridVecFlat
    BigBig['z'] = zBufferFillGridVecFlat
    BigBig['grid_id'] = gridIdFlat
    BigBig['grid_aftershock_count'] = Counts

    print 'Writing output CSV file...\n'
    ti = time.time()
    fileNameReturn = WriteOutput.WriteCSV(fileName, BigBig)
    print 'Writing output took ' + str(time.time()-ti) + ' seconds.\n'

    return
Example #34
0
turtle.goto(100 - screen.window_width() / 2, 10 - screen.window_height() / 2)
turtle.pendown()

turtle.hideturtle()
turtle.speed(0)

# Расчет углов
alpha = sigma
epsilon = ro / 2
beta = epsilon + 180 / n
gamma = 180 - (alpha + beta)

count = 1
row = []

a_side, c_side = Calculate.solve_triangle(alpha, beta, gamma, b_side)

print('Треугольник', count, [alpha, beta, gamma, a_side, b_side, c_side])

# считаем угол противоположного треугольника

angle = Calculate.get_angle(a_side, c_side, 180 - gamma - alpha - ro)

for i in range(n):  # Добавить 1, если делаем lampshade
    # row.append(turtle.clone())
    turtle.forward(b_side)
    row.append(turtle.clone())
    turtle.left(ro)

# Создать копию массива row,
# чтобы отрисовать нижние треугольники, если делаем lampshade
Example #35
0
# lets see about "Renaming a Modules" in python.

# It is possible to import the module by using specific user defined name called as "alias name".

# It is posssibile to access the functionality of the module by using its "alias name" which was declared while importing the module.

# Syntax for renaming a module is:

# import moduleName as aliasName

# Here is the program which is used to import module "Calculate.py" by using an alias name.
    
import Calculate as Cal

# Getting user input for variables a and b.

a = int( input ( "\nEnter a value for \"a\" : " ) )

b = int( input ( "\nEnter a value for \"b\" : " ) )

# Accessing the functionality present in the module "Calculate" using alias name by using dot( . ) operator.

Cal.Add( a, b) 

Cal.Multi( a,b )

print( ) # Used to print new line for readability.
Example #36
0
    Session_state.index = Session_state.index + 1

#Inspect order
if st.button("Analyze"):
    st.write(df.iloc[Session_state.index - 1])
    data = MarketstackApi.requestData(Session_state.index - 1, df)
    x = Graph.axis_generate(data)[0]
    y = Graph.axis_generate(data)[1]
    xlabel = Graph.axis_generate(data)[2]

    myline = np.linspace(0, 7, 200)  #Curve line
    mymodel = np.poly1d(np.polyfit(x, y, 3))  #3rd degree model

    #Draw Matplotlib chart
    fig = plt.figure()
    plt.xticks(x, np.flip(xlabel))
    plt.title('Weighted-price of ' +
              df.iloc[Session_state.index - 1].StockCode)
    plt.xlabel('Date')
    plt.ylabel('Price')
    plt.scatter(x, y, label='Weighted prices')
    plt.plot(myline, mymodel(myline), label='Best fit line')
    # plt.scatter(3,df.iloc[Session_state.index-1].Price, marker = 'o', color='r', label = "Order Price")
    plt.legend(loc=4)
    st.pyplot(fig)

    #Find Critical values:
    min_max = Graph.find_min_max(mymodel, y)
    Score = Calculate.calculateScore(Session_state.index - 1, df, min_max)
    st.subheader("This order rating is:")
    st.subheader(Score)
Example #37
0
import Calculate

print("Please two number for calculate operator basic....")
print()
num = int(input("Enter num one please: "))
num2 = int(input("Enter num two please: "))
print()
print("Calculate the operator.................")
print()

calculate = Calculate.Calculate(num, num2)

print(f"Teh sun is : {calculate.sum()}")
print(f"The subtraction is: {calculate.subtraction()}")
print(f"The Divide is: {calculate.divide()}")
print(f"The Multiply is: {calculate.multiply()}")

print()
print("Calculate the operator finish.................")
print("Thanks....")
Example #38
0
import Calculate


poni_file = '/media/Seagate_Backup_Plus_Drive/Mar2014/Ni/17_21_24_NiSTD_300K-00002.poni'
geometry_object = IO.loadgeometry(poni_file)

# load image
xray_image = IO.loadimage(
    '/media/Seagate_Backup_Plus_Drive/Mar2014/Ni/17_21_24_NiSTD_300K-00002.tif')

# generate radial array based on image shape
# TODO: use a more Q/angle based system
Radi = geometry_object.rArray(xray_image.shape)
angles = geometry_object.chiArray(xray_image.shape)

# create integer array of radi by division by the pixel resolution
roundR = np.around(Radi / geometry_object.pixel1).astype(int)

# define the maximum radius for creation of numpy arrays
# make maxr into an integer so it can be used as a counter
maxr = int(np.ceil(np.amax(roundR)))

pols = []
print 'start opts'
for r in range(0, maxr, 100):
    pols.append(
        Calculate.optomize_polarization(r, xray_image, geometry_object, roundR, angles))
pols = np.array(pols)
plt.plot(pols)
plt.show()