예제 #1
0
def ztrack1_to_autoscale_element(lattice,
                                 t,
                                 p,
                                 z,
                                 autoscale_element=None,
                                 workdir=None,
                                 ccs='wcs'):

    lat_temp = Lattice('temp')

    if (autoscale_element):
        stop_element_index = lattice.element_index(autoscale_element.name)
        lat_temp._elements = lattice._elements[:stop_element_index]
        z_stop = get_auto_element_z_ccs_beg(autoscale_element)
        stop_name = autoscale_element.name

    else:

        lat_temp._elements = lattice._elements
        z_stop = lattice[-1].z_end_ccs
        stop_name = 'END'

    if (workdir is None):
        tempdir = tempfile.TemporaryDirectory(dir=workdir)
        gpt_file = os.path.join(tempdir.name, f'track_to_{stop_name}.gpt.in')
        workdir = tempdir.name

    else:

        gpt_file = os.path.join(workdir, f'track_to_{stop_name}.gpt.in')

    lat_temp.write_gpt_lines(ztrack1_template(gpt_file), output_file=gpt_file)

    G = GPT(gpt_file, workdir=workdir, use_tempdir=False, ccs_beg=ccs)
    return G.track1_in_ccs(z, z_stop, pz0=p, t0=t, ccs=ccs)
예제 #2
0
def get_gpt(gpt_key, gpt_engine, gpt_temperature, gpt_max_tokens,
            example_file):
    ''' define a gpt object
    
    Args:
        gpt_key: key under "Secret" here https://beta.openai.com/developer-quickstart
        gpt_engine: language model identifier (see https://beta.openai.com/api-ref for valid values)
        gpt_temperature: sampling temperature - Higher values means the model will take more risks
        gpt_max_tokens: How many tokens to complete to, up to a maximum of 512.
    
    Returns:
        gpt: gpt object (newly created gpt object if use_saved_gpt is False; gpt object from pickle file if use_saved_gpt is True)

    '''
    try:
        # check whether to use gpt from pickle file
        # create a new gpt object
        openai.api_key = gpt_key
        gpt = GPT(engine=gpt_engine,
                  temperature=gpt_temperature,
                  max_tokens=gpt_max_tokens)
        # add examples
        # load dataframe from example file
        path = get_path()
        example_df = pd.read_csv(os.path.join(path, example_file))
        for index, row in example_df.iterrows():
            # print(row['question'],row['answer'])
            gpt.add_example(Example(row['question'], row['answer']))
    except Exception as error:
        print('ERROR', error)
    else:
        return gpt
예제 #3
0
def segment(prompt):
    openai.api_key = os.environ.get("GPT_KEY")

    gpt = GPT(temperature=0,
              max_tokens=500,
              append_output_prefix_to_query=False,
              output_prefix="")

    gpt.add_example(
        Example(
            """George Washington (February 22, 1732[b] – December 14, 1799) was an American political leader, military general, statesman, and Founding Father who served as the first president of the United States from 1789 to 1797. Previously, he led Patriot forces to victory in the nation's War for Independence. He presided at the Constitutional Convention of 1787, which established the U.S. Constitution and a federal government. Washington has been called the "Father of His Country" for his manifold leadership in the formative days of the new nation.

Washington's first public office was serving as official Surveyor of Culpeper County, Virginia from 1749 to 1750. Subsequently, he received his initial military training and a command with the Virginia Regiment during the French and Indian War. He was later elected to the Virginia House of Burgesses and was named a delegate to the Continental Congress, where he was appointed Commanding General of the Continental Army. He commanded American forces, allied with France, in the defeat and surrender of the British during the Siege of Yorktown. He resigned his commission after the Treaty of Paris in 1783.

Washington played a key role in adopting and ratifying the Constitution and was then twice elected president by the Electoral College. He implemented a strong, well-financed national government while remaining impartial in a fierce rivalry between cabinet members Thomas Jefferson and Alexander Hamilton. During the French Revolution, he proclaimed a policy of neutrality while sanctioning the Jay Treaty. He set enduring precedents for the office of president, including the title "Mr. President", and his Farewell Address is widely regarded as a pre-eminent statement on republicanism.
""", "[\"George Washington was born on February 22, 1732.\", \" George Washington died on December 14, 1799\", \"George Washington was an American political leader and military general, statesman.\", \"George Wahington was the Founding Father who served as the first president of the United States from 1789 to 1797.\", \"George Washington led the Patriot forces to victory in the nation's War for Independence. \", \"George Washington presided at the Constitutional Convention of 1787, which established the U.S. Constitution and a federal government.\", \"George Washington has been called the 'Father of His Country' for his manifold leadership in the formative days of the new nation.\", \"George Washington was serving as official Surveyor of Culpeper County, Virginia from 1749 to 1750. \", \"George Washington was later elected to the Virginia House of Burgesses and was named a delegate to the Continental Congress.\", \"George Washington was appointed Commanding General of the Continental Army.\", \"George Washington commanded American forces, allied with France, in the defeat and surrender of the British during the Siege of Yorktown. \", \"George Washington resigned his commission after the Treaty of Paris in 1783.\", \"George Washington played a key role in adopting and ratifying the Constitution \", \"George Washington was then twice elected president by the Electoral College.\", \"George Washington during the French Revolution, proclaimed a policy of neutrality while sanctioning the Jay Treaty. \"]"
        ))

    sjExample = Example(
        """Apple was founded in April 1976 by the late Steve Jobs and Ronald Wayne. Wayne would leave the Apple company only three months after its founding to take a job with Atari, and Jobs would end up buying the company from him.""",
        "[\"Apple was founded in April 1976 by the late Steve Jobs and Ronald Wayne.\", \"Ronald Wayne would leave the Apple company only three months after its founding to take a job with Atari, and Jobs would end up buying the company from him.\"]"
    )
    gpt.add_example(sjExample)

    gpt.add_example(
        Example(
            """Adenosine triphosphate (ATP) is an organic compound and hydrotrope that provides energy to drive many processes in living cells, such as muscle contraction, nerve impulse propagation, condensate dissolution, and chemical synthesis. Found in all known forms of life, ATP is often referred to as the "molecular unit of currency" of intracellular energy transfer. [2] When consumed in metabolic processes such as cellular respiration, it converts either to adenosine diphosphate (ADP) or to adenosine monophosphate (AMP). Other processes regenerate ATP so that the human body recycles its own body weight equivalent in ATP each day.[3] It is also a precursor to DNA and RNA, and is used as a coenzyme.""",
            "[\"ATP is an organic compound and hydrotrope.\", \"ATP provides energy to drive many processes in living cells.\", \"ATP is often referred to as the 'molecular unit of currency' of intracellular energy transfer.\"]"
        ))

    out = gpt.submit_request(prompt).choices[0].text
    return out
예제 #4
0
    def parse(self, test):
        gpt3 = GPT(engine="davinci", temperature=0.9, max_tokens=200)

        for instance in self.data["data"]:
            gpt3.add_example(Example(get_content(instance, self.c_type), get_answer(instance)))
        output = gpt3.get_top_reply(test).replace("output: ", "").strip()

        output = output[: output.find("}") + 1]
        output = output.replace("'", '"')
        key_list = [list(x["answer"].keys()) for x in self.data["data"]]
        return output, key_list
예제 #5
0
def main():
    openai_key = os.getenv("OPENAI_KEY")
    set_openai_key(openai_key)
    gpt = GPT(temperature=0.2, max_tokens=10)

    train, validation = combustion_reactions[:4], combustion_reactions[4:]

    for example in train:
        gpt.add_example(Example(example.lhs, example.rhs))

    for idx, example in enumerate(validation):
        print(idx + 1)
        print(f"GPT prediction: {gpt.get_top_reply(example.lhs)}")
        print(f"Actual: {example.rhs}")
        print("==========================")
예제 #6
0
def main():
    with open('GPT_SECRET_KEY.json') as f:
        data = json.load(f)
    openai.api_key = data["API_KEY"]
    gpt = GPT(engine="davinci",
              temperature=0.0,
              max_tokens=100,
              output_prefix="output (positive/neutral/negative):")
    all_data = pd.read_csv(filename, header=0, encoding="utf8", sep=":->")
    all_data['Sentiment_class_label'] = all_data[
        'Sentiment_class_label'].apply(lambda x: change_labels(x))
    for row in range(all_data.shape[0]):
        gpt.add_example(
            Example(all_data['Phrase_text'][row],
                    all_data['Sentiment_class_label'][row]))
    return (gpt.get_prime_text())
예제 #7
0
def init_gpt(_gpt_info_dict, _prompt_design_info_dict):
    _engine = _gpt_info_dict["engine"]
    _temperature = _gpt_info_dict["temperature"]
    _max_tokens = _gpt_info_dict["max_tokens"]
    _promptType = _prompt_design_info_dict["prompt_type"]
    # _input_prefix = _prompt_design_dict["input_prefix"]
    # _input_suffix = ""
    # _output_prefix = _prompt_design_dict["output_prefix"]
    # _output_suffix = ""
    if _promptType == "long":
        _stop = _gpt_info_dict["stop_sequence"]
    elif _promptType == "short":
        _stop = "\n"
    _frequency_penalty = _gpt_info_dict["frequency_penalty"]
    _presence_penalty = _gpt_info_dict["presence_penalty"]
    _top_p = _gpt_info_dict["top_p"]
    _n = _gpt_info_dict["n"]
    _logprobs = _gpt_info_dict["logprobs"]
    with open('arousal/GPT_SECRET_KEY.json') as f:
        data = json.load(f)
    openai.api_key = data["API_KEY"]
    _gpt_instance = GPT(engine=_engine,
                        temperature=_temperature,
                        max_tokens=_max_tokens,
                        stop=_stop,
                        top_p=_top_p,
                        n=_n,
                        frequency_penalty=_frequency_penalty,
                        presence_penalty=_presence_penalty,
                        logprobs=_logprobs)
    return _gpt_instance
예제 #8
0
def write_output(engine, temp, max_tokens, all_data, df_used):
    #def write_output(engine, temp, max_tokens, all_data, num_training_per_cate):
    gpt = GPT(engine=engine,
              temperature=temp,
              max_tokens=max_tokens,
              output_prefix="Sentiment:",
              append_output_prefix_to_query=True)
    #gpt = add_examples(gpt, df_used)
    out_df = write_prompts(all_data, gpt)
    out_df = out_df[["Sentiment_class_label", "gpt_output", "Phrase_text"]]
    out_df["gpt_output"] = out_df["gpt_output"].apply(lambda x: map_back(x))
    print(out_df)
    #out_df.drop("Arousal_class_label", axis = 1, inplace = True)
    #accuracy = ((np.sum(out_df['matched'])) / (out_df.shape[0])) * 100
    #print("Accuracy: {}".format(accuracy))
    out_df.to_csv('outfinal_descriptive_zeroshot.txt',
                  header=False,
                  index=None,
                  sep=",",
                  mode='a')
    #print("###" * 50)
    #print("Instances GPT-3 categorised as Mixed:")
    #mixed_df.to_csv("outzeroshot.txt", sep = " ", mode = "a")
    line_prepender('outfinal_descriptive_zeroshot.txt',
                   "VALENCE,AROUSAL,SENTENCE")
예제 #9
0
def write_output(engine, temp, max_tokens, all_data, num_training_per_cate):
    gpt = GPT(engine = engine, temperature = temp, max_tokens = max_tokens, output_prefix = "Sentiment:")
    #gpt = add_examples(gpt, df_used)
    out_df, mixed_df = write_prompts(all_data, gpt)
    #print(out_df)
    out_df.drop("Sentiment_class_label", axis = 1, inplace = True)
    accuracy = ((np.sum(out_df['matched'])) / (out_df.shape[0])) * 100
    print("Accuracy: {}".format(accuracy))
    out_df.to_csv('outzeroshot.txt', header = ['PHRASE_TEXT', 'GPT_OUTPUT', 'MATCHED'], index = None, sep = " ", mode = 'a')
    print("###" * 50)
    print("Instances GPT-3 categorised as Mixed:")
    mixed_df.to_csv("outzeroshot.txt", sep = " ", mode = "a")
    line_prepender('outzeroshot.txt', "ACCURACY: {:.2f}, TEMPERATURE: {}".format(accuracy, temp))
예제 #10
0
def write_output(engine, temp, max_tokens, all_data, df_used):
    gpt = GPT(engine=engine,
              temperature=temp,
              max_tokens=max_tokens,
              output_prefix="Sentiment:",
              append_output_prefix_to_query=True)
    gpt = add_examples(gpt, df_used)
    out_df, mixed_df = write_prompts(all_data, gpt)
    print("Mixed DF")
    print(mixed_df)
    SVM_df = out_df[["Phrase_text", "gpt_output", "SVM_matched"]]
    NV_df = out_df[["Phrase_text", "gpt_output", "Natural_matched"]]
    SV_df = out_df[["Phrase_text", "gpt_output", "ServRep_matched"]]
    #out_df.drop("Sentiment_class_label", axis = 1, inplace = True)
    SVM_accuracy = ((np.sum(SVM_df['SVM_matched'])) / (SVM_df.shape[0])) * 100
    print("SVM Accuracy: {}".format(SVM_accuracy))
    NV_accuracy = ((np.sum(NV_df['Natural_matched'])) / (NV_df.shape[0])) * 100
    print("Natural Validation Accuracy: {}".format(NV_accuracy))
    SV_accuracy = ((np.sum(SV_df['ServRep_matched'])) / (SV_df.shape[0])) * 100
    print("ServRep Accuracy: {}".format(SV_accuracy))
    SVM_df.to_csv('GPTvsSVM_mixed.txt',
                  header=['PHRASE_TEXT', 'GPT_OUTPUT', 'MATCHED'],
                  index=None,
                  sep=" ",
                  mode='a')
    #print("###" * 50)
    #print("Instances GPT-3 categorised as Mixed:")
    #mixed_df.to_csv("outzeroshot.txt", sep = " ", mode = "a")
    line_prepender(
        'GPTvsSVM_mixed.txt',
        "SVM_ACCURACY: {:.2f}, TEMPERATURE: {}".format(SVM_accuracy, temp))
    NV_df.to_csv('GPTvsNatural_mixed.txt',
                 header=['PHRASE_TEXT', 'GPT_OUTPUT', 'MATCHED'],
                 index=None,
                 sep=" ",
                 mode='a')
    line_prepender(
        'GPTvsNatural_mixed.txt',
        "NATURAL_ACCURACY: {:.2f}, TEMPERATURE: {}".format(NV_accuracy, temp))
    SV_df.to_csv('GPTvsServRep_mixed.txt',
                 header=['PHRASE_TEXT', 'GPT_OUTPUT', 'MATCHED'],
                 index=None,
                 sep=" ",
                 mode='a')
    line_prepender(
        'GPTvsServRep_mixed.txt',
        "SERVREP_ACCURACY: {:.2f}, TEMPERATURE: {}".format(SV_accuracy, temp))
예제 #11
0
def track_on_axis(element,
                  t,
                  p,
                  xacc=6.5,
                  GBacc=12,
                  dtmin=1e-15,
                  dtmax=1e-8,
                  n_screen=1,
                  workdir=None):

    if (workdir is None):
        tempdir = tempfile.TemporaryDirectory(dir=workdir)
        gpt_file = os.path.join(tempdir.name, f'{element.name}.gpt.in')
        workdir = tempdir.name

    else:

        gpt_file = os.path.join(workdir, f'{element.name}.gpt.in')

    element.write_element_to_gpt_file(ztrack1_template(gpt_file))

    G = GPT(gpt_file,
            ccs_beg=element.ccs_beg,
            workdir=workdir,
            use_tempdir=False)
    G.set_variables({
        'xacc': xacc,
        'GBacc': GBacc,
        'dtmin': dtmin,
        'dtmax': dtmax
    })

    z_beg = np.linalg.norm(element._ccs_beg_origin - element._p_beg)

    G.track1_in_ccs(z_beg=z_beg,
                    z_end=z_beg + element.length,
                    ccs=element.ccs_beg,
                    pz0=p,
                    t0=t,
                    weight=1,
                    status=1,
                    species='electron',
                    xacc=xacc,
                    GBacc=GBacc,
                    n_screen=n_screen)

    return G
예제 #12
0
파일: main.py 프로젝트: dsgelab/RiskDemo
import json
import itertools
from sqlalchemy import create_engine

from sklearn.metrics.pairwise import cosine_similarity
import transformers
from sentence_transformers import SentenceTransformer, LoggingHandler

engine = create_engine('postgresql://gpt3')

global gpt

app = Flask(__name__, static_url_path='')
openai.api_key = os.getenv("OPENAI_API_KEY")

gpt = GPT(engine="curie", temperature=0, max_tokens=300)
# davinci curie babbage ada

gpt.add_example(
    Example(
        'I am a 78-year-old man with angina. What is my risk of having heart failure?',
        'angina, heart failure, 78, male'))
gpt.add_example(
    Example(
        'What is my risk of having angina if I am a 53-year-old woman with a history of stroke?',
        'stroke, angina, 53, female'))
gpt.add_example(
    Example('I am at 40 with angina. What is my risk of having headache?',
            'angina, headache, 40, na'))
gpt.add_example(
    Example('I am a guy at 47 with migraine. What is my risk of stroke?',
예제 #13
0
def run_gpt_with_particlegroup(settings=None,
                               gpt_input_file=None,
                               input_particle_group=None,
                               workdir=None,
                               use_tempdir=True,
                               gpt_bin='$GPT_BIN',
                               timeout=2500,
                               auto_phase=False,
                               verbose=False,
                               gpt_verbose=False,
                               asci2gdf_bin='$ASCI2GDF_BIN'):
    """
    Run gpt with particles from ParticleGroup. 
    
        settings: dict with keys that are in gpt input file.    
        
    """

    # Call simpler evaluation if there is no input_particle_group:
    if (input_particle_group is None):
        return run_gpt(settings=settings,
                       gpt_input_file=gpt_input_file,
                       workdir=workdir,
                       use_tempdir=use_tempdir,
                       gpt_bin=gpt_bin,
                       timeout=timeout,
                       verbose=verbose)

    if (verbose):
        print('Run GPT with ParticleGroup:')

    unit_registry = UnitRegistry()

    # Make gpt and generator objects
    G = GPT(gpt_bin=gpt_bin,
            input_file=gpt_input_file,
            initial_particles=input_particle_group,
            workdir=workdir,
            use_tempdir=use_tempdir)
    G.timeout = timeout
    G.verbose = verbose

    # Set inputs
    if settings:
        for k, v in settings.items():
            G.set_variable(k, v)

    if ('final_charge' in settings):
        raise ValueError(
            'final_charge is deprecated, please specify value and units instead.'
        )

    # Run
    if (auto_phase):

        if (verbose):
            print('\nAuto Phasing >------\n')
        t1 = time.time()

        # Create the distribution used for phasing
        if (verbose):
            print('****> Creating initial distribution for phasing...')

        phasing_beam = get_distgen_beam_for_phasing_from_particlegroup(
            input_particle_group, n_particle=10, verbose=verbose)
        phasing_particle_file = os.path.join(G.path,
                                             'gpt_particles.phasing.gdf')
        write_gpt(phasing_beam,
                  phasing_particle_file,
                  verbose=verbose,
                  asci2gdf_bin=asci2gdf_bin)

        if (verbose):
            print('<**** Created initial distribution for phasing.\n')

        G.write_input_file()  # Write the unphased input file

        phased_file_name, phased_settings = gpt_phasing(
            G.input_file,
            path_to_gpt_bin=G.gpt_bin[:-3],
            path_to_phasing_dist=phasing_particle_file,
            verbose=verbose)
        G.set_variables(phased_settings)
        t2 = time.time()

        if (verbose):
            print(f'Time Ellapsed: {t2-t1} sec.')
            print('------< Auto Phasing\n')

    # If here, either phasing successful, or no phasing requested
    G.run(gpt_verbose=gpt_verbose)

    if ('final_charge:value' in settings and 'final_charge:units' in settings
            and len(G.screen) > 0):
        final_charge = settings[
            'final_charge:value'] * unit_registry.parse_expression(
                settings['final_charge:units'])
        final_charge = final_charge.to('coulomb').magnitude
        clip_to_charge(G.screen[-1], final_charge, make_copy=False)

    if ('final_radius:value' in settings and 'final_radius:units' in settings
            and len(G.screen) > 0):
        final_radius = settings[
            'final_radius:value'] * unit_registry.parse_expression(
                settings['final_radius:units'])
        final_radius = final_radius.to('meter').magnitude
        take_range(G.screen[-1], 'r', 0, final_radius)

    if (input_particle_group['sigma_t'] == 0.0):
        # Initial distribution is a tout
        if (G.output['n_tout'] > 0):
            G.output['particles'].insert(0, input_particle_group)
            G.output['n_tout'] = G.output['n_tout'] + 1
    else:
        # Initial distribution is a screen
        if (G.output['n_screen'] > 0):
            G.output['particles'].insert(G.output['n_tout'],
                                         input_particle_group)
            G.output['n_screen'] = G.output['n_screen'] + 1

    return G
예제 #14
0
def multirun_gpt_with_particlegroup(settings=None,
                                    gpt_input_file=None,
                                    input_particle_group=None,
                                    workdir=None,
                                    use_tempdir=True,
                                    gpt_bin='$GPT_BIN',
                                    timeout=2500,
                                    auto_phase=False,
                                    verbose=False,
                                    gpt_verbose=False,
                                    asci2gdf_bin='$ASCI2GDF_BIN'):
    """
    Run gpt with particles from ParticleGroup. 
    
        settings: dict with keys that are in gpt input file.    
        
    """

    unit_registry = UnitRegistry()

    # Call simpler evaluation if there is no input_particle_group:
    if (input_particle_group is None):
        raise ValueError('Must supply input_particle_group')

    if (verbose):
        print('Run GPT with ParticleGroup:')

    if ('clipping_charge' in settings):
        raise ValueError(
            'clipping_charge is deprecated, please specify value and units instead.'
        )
    if ('final_charge' in settings):
        raise ValueError(
            'final_charge is deprecated, please specify value and units instead.'
        )
    if ('t_restart' not in settings):
        raise ValueError('t_restart must be supplied')

    t_restart = settings['t_restart']

    if ('restart_file' not in settings):
        # Make gpt and generator objects
        G = GPT(gpt_bin=gpt_bin,
                input_file=gpt_input_file,
                initial_particles=input_particle_group,
                workdir=workdir,
                use_tempdir=use_tempdir)
        G.timeout = timeout
        G.verbose = verbose

        # Set inputs
        if settings:
            for k, v in settings.items():
                G.set_variable(k, v)
        else:
            raise ValueError('Must supply settings')

        G.set_variable('multi_run', 0)
        if (auto_phase):

            if (verbose):
                print('\nAuto Phasing >------\n')
            t1 = time.time()

            # Create the distribution used for phasing
            if (verbose):
                print('****> Creating initial distribution for phasing...')

            phasing_beam = get_distgen_beam_for_phasing_from_particlegroup(
                input_particle_group, n_particle=10, verbose=verbose)
            phasing_particle_file = os.path.join(G.path,
                                                 'gpt_particles.phasing.gdf')
            write_gpt(phasing_beam,
                      phasing_particle_file,
                      verbose=verbose,
                      asci2gdf_bin=asci2gdf_bin)

            if (verbose):
                print('<**** Created initial distribution for phasing.\n')

            G.write_input_file()  # Write the unphased input file

            phased_file_name, phased_settings = gpt_phasing(
                G.input_file,
                path_to_gpt_bin=G.gpt_bin[:-3],
                path_to_phasing_dist=phasing_particle_file,
                verbose=verbose)
            G.set_variables(phased_settings)
            t2 = time.time()

            if (verbose):
                print(f'Time Ellapsed: {t2-t1} sec.')
                print('------< Auto Phasing\n')

        G.set_variable('multi_run', 1)
        G.set_variable('last_run', 2)
        G.set_variable('t_start', 0.0)
        G.set_variable('t_restart', t_restart)

        # If here, either phasing successful, or no phasing requested
        G.run(gpt_verbose=gpt_verbose)
    else:
        G = GPT()
        G.load_archive(settings['restart_file'])
        if settings:
            for k, v in settings.items():
                G.set_variable(k, v)

    # Remove touts and screens that are after t_restart
    t_restart_with_fudge = t_restart + 1.0e-18  # slightly larger that t_restart to avoid floating point comparison problem
    G.output['n_tout'] = np.count_nonzero(
        G.stat('mean_t', 'tout') <= t_restart_with_fudge)
    G.output['n_screen'] = np.count_nonzero(
        G.stat('mean_t', 'screen') <= t_restart_with_fudge)
    for p in reversed(G.particles):
        if (p['mean_t'] > t_restart_with_fudge):
            G.particles.remove(p)

    G_all = G  # rename it, and then overwrite G

    if (verbose):
        print(f'Looking for tout at t = {t_restart}')
    restart_particles = get_screen_data(G,
                                        tout_t=t_restart,
                                        use_extension=False,
                                        verbose=verbose)[0]

    if ('clipping_charge:value' in settings
            and 'clipping_charge:units' in settings):
        clipping_charge = settings[
            'clipping_charge:value'] * unit_registry.parse_expression(
                settings['clipping_charge:units'])
        clipping_charge = clipping_charge.to('coulomb').magnitude
        clip_to_charge(restart_particles, clipping_charge, make_copy=False)

    G = GPT(gpt_bin=gpt_bin,
            input_file=gpt_input_file,
            initial_particles=restart_particles,
            workdir=workdir,
            use_tempdir=use_tempdir)
    G.timeout = timeout
    G.verbose = verbose

    for k, v in G_all.input["variables"].items():
        G.set_variable(k, v)

    G.set_variable('multi_run', 2)
    G.set_variable('last_run', 2)
    G.set_variable('t_start', t_restart)
    G.run(gpt_verbose=gpt_verbose)

    G_all.output['particles'][G_all.output['n_tout']:G_all.
                              output['n_tout']] = G.tout
    G_all.output['particles'] = G_all.output['particles'] + G.screen
    G_all.output['n_tout'] = G_all.output['n_tout'] + G.output['n_tout']
    G_all.output['n_screen'] = G_all.output['n_screen'] + G.output['n_screen']

    if ('final_charge:value' in settings and 'final_charge:units' in settings
            and len(G_all.screen) > 0):
        final_charge = settings[
            'final_charge:value'] * unit_registry.parse_expression(
                settings['final_charge:units'])
        final_charge = final_charge.to('coulomb').magnitude
        clip_to_charge(G_all.screen[-1], final_charge, make_copy=False)

    if (input_particle_group['sigma_t'] == 0.0):
        # Initial distribution is a tout
        if (G_all.output['n_tout'] > 0):
            # Don't include the cathode if there are no other screens. Screws up optimizations of "final" screen when there is an error
            G_all.output['particles'].insert(0, input_particle_group)
            G_all.output['n_tout'] = G_all.output['n_tout'] + 1
    else:
        # Initial distribution is a screen
        if (G_all.output['n_screen'] > 0):
            # Don't include the cathode if there are no other screens. Screws up optimizations of "final" screen when there is an error
            G_all.output['particles'].insert(G_all.output['n_tout'],
                                             input_particle_group)
            G_all.output['n_screen'] = G_all.output['n_screen'] + 1

    return G_all
예제 #15
0
def run_gpt_with_distgen(settings=None,
                         gpt_input_file=None,
                         distgen_input_file=None,
                         workdir=None,
                         use_tempdir=True,
                         gpt_bin='$GPT_BIN',
                         timeout=2500,
                         auto_phase=False,
                         verbose=False,
                         gpt_verbose=False,
                         asci2gdf_bin='$ASCI2GDF_BIN'):
    """
    Run gpt with particles generated by distgen. 
    
        settings: dict with keys that can appear in an gpt or distgen Generator input file. 
        
    Example usage:
        G = run_gpt_with_distgen({'lspch':False},
                       gpt_input_file='$LCLS_LATTICE/gpt/models/gunb_eic/gpt.in',
                       distgen_input_file='$LCLS_LATTICE/distgen/models/gunb_gaussian/gunb_gaussian.json',
                       verbose=True,
                       timeout=None
                      )        
        
    """

    # Call simpler evaluation if there is no generator:
    if not distgen_input_file:
        return run_gpt(settings=settings,
                       gpt_input_file=gpt_input_file,
                       workdir=workdir,
                       use_tempdir=use_tempdir,
                       gpt_bin=gpt_bin,
                       timeout=timeout,
                       verbose=verbose)

    if (verbose):
        print('Run GPT with Distgen:')

    # Make gpt and generator objects
    G = GPT(gpt_bin=gpt_bin,
            input_file=gpt_input_file,
            workdir=workdir,
            use_tempdir=use_tempdir)
    G.timeout = timeout
    G.verbose = verbose

    # Distgen generator
    gen = Generator(verbose=verbose)
    f = full_path(distgen_input_file)
    distgen_params = yaml.safe_load(open(f))

    # Set inputs
    if settings:
        G, distgen_params = set_gpt_and_distgen(G,
                                                distgen_params,
                                                settings,
                                                verbose=verbose)

    # Link particle files
    particle_file = os.path.join(G.path, G.get_dist_file())

    if (verbose):
        print('Linking particle files, distgen output will point to -> "' +
              os.path.basename(particle_file) + '" in working directory.')

    G.set_dist_file(particle_file)

    if ('output' in distgen_params and verbose):
        print('Replacing Distgen output params')

    distgen_params['output'] = {'type': 'gpt', 'file': particle_file}

    if (verbose):
        print('\nDistgen >------\n')
    # Configure distgen
    gen.parse_input(distgen_params)

    # Run
    beam = gen.beam()
    write_gpt(beam, particle_file, verbose=verbose, asci2gdf_bin=asci2gdf_bin)

    if (verbose):
        print('------< Distgen\n')

    if (auto_phase):

        if (verbose):
            print('\nAuto Phasing >------\n')
        t1 = time.time()

        # Create the distribution used for phasing
        if (verbose):
            print('****> Creating intiial distribution for phasing...')

        phasing_beam = get_distgen_beam_for_phasing(beam,
                                                    n_particle=10,
                                                    verbose=verbose)
        phasing_particle_file = os.path.join(G.path,
                                             'gpt_particles.phasing.gdf')
        write_gpt(phasing_beam,
                  phasing_particle_file,
                  verbose=verbose,
                  asci2gdf_bin=asci2gdf_bin)

        if (verbose):
            print('<**** Created intiial distribution for phasing.\n')

        G.write_input_file()  # Write the unphased input file
        phased_file_name, phased_settings = gpt_phasing(
            G.input_file,
            path_to_gpt_bin=G.gpt_bin[:-3],
            path_to_phasing_dist=phasing_particle_file,
            verbose=verbose)
        G.set_variables(phased_settings)
        t2 = time.time()
        if (verbose):
            print(f'Time Ellapsed: {t2-t1} sec.')
            print('------< Auto Phasing\n')

    G.run(gpt_verbose=gpt_verbose)

    return G
예제 #16
0
def autoscale1(lattice,
               t=0,
               p=1e-15,
               workdir=None,
               ztrack1_through=True,
               verbose=True,
               n_screen=200):

    ts = []
    ps = []
    zs = []
    ss = []

    runs = []

    auto_elements = [
        element for element in lattice._elements
        if (element.type in ['Map1D_TM', 'Map25D_TM', 'Sectormagnet'])
    ]

    if (len(auto_elements) < 1):

        if (verbose):
            print('autoscale1: no cavities to phase')
            print(f'\n> Tracking: BEG:END')

        ts.append(t)
        ps.append(p)
        zs.append(lattice[0].z_beg_ccs)
        ss.append(lattice[0].s_beg)

        fparticle = ztrack1_to_autoscale_element(lattice,
                                                 0,
                                                 p,
                                                 lattice[0].z_beg_ccs,
                                                 workdir=None)
        assert fparticle is not None, f'Particle tracking from BEG to END failed.'
        assert np.abs(
            fparticle.screen[-1]['mean_z'] - lattice[-1].z_end_ccs
        ) < 1e-14, f'Error tracking to END: particle was not located at cavity entrance.'

        runs.append(fparticle)

        current_t = fparticle.screen[-1]['mean_t']
        current_p = fparticle.screen[-1]['mean_p']
        current_z = fparticle.screen[-1]['mean_z']
        current_s = fparticle.screen[-1]['mean_z']

        ts.append(current_t)
        ps.append(current_p)
        zs.append(current_z)
        ss.append(current_s)
        runs.append(fparticle)

        if (verbose):
            print(f'   energy gain: {p2e(current_p)-p2e(p)} eV.')

        if (ztrack1_through):

            if (workdir is None):
                tempdir = tempfile.TemporaryDirectory(dir=workdir)
                gpt_file = os.path.join(tempdir.name, f'track_lattice.gpt.in')
                workdir = tempdir.name

            else:

                gpt_file = os.path.join(workdir, f'gpt.temp.in')

            lattice.write_gpt_lines(ztrack1_template(gpt_file),
                                    output_file=gpt_file)

            G = GPT(gpt_file, workdir=workdir, use_tempdir=False)
            G = G.track1_in_ccs(lattice[0].z_beg_ccs,
                                lattice[-1].z_end_ccs,
                                pz0=p,
                                t0=t,
                                n_screen=n_screen)

            return (ts, ps, zs, ss, runs, G)

    # Only here if lattice contains rf_elements and bends

    # Check that auto_elements do not overlap:
    for ii, cav in enumerate(auto_elements[:-1]):
        if (ii + 1 < len(auto_elements)):
            next_cav = auto_elements[ii + 1]
            assert cav.s_end <= next_cav.s_beg, f'Autophasing Error: cavities {cav.name} and {next_cav.name} overlap and cannot be phased.'

    current_t = t
    current_p = p
    current_z = lattice[0].s_beg
    current_s = lattice[0].s_beg

    ts.append(current_t)
    ps.append(current_p)
    zs.append(current_z)
    ss.append(current_s)

    current_ccs = lattice[0].ccs_beg

    if (current_z < auto_elements[0].z_beg_ccs):

        print(current_z, auto_elements[0].z_beg_ccs)

        if (verbose):
            print(f'\n> Tracking: BEG:{auto_elements[0].name}')

        fparticle = ztrack1_to_autoscale_element(lattice,
                                                 current_t,
                                                 current_p,
                                                 current_z,
                                                 auto_elements[0],
                                                 workdir=workdir)
        print(fparticle.screen[-1]['mean_z'], auto_elements[0].z_beg_ccs)
        assert fparticle is not None, f'Particle tracking from BEG to {auto_elements[0].name} failed.'
        assert np.abs(
            fparticle.screen[-1]['mean_z'] - auto_elements[0].z_beg_ccs
        ) < 1e-14, f'Error tracking to {auto_elements[0].name}: particle was not located at cavity entrance.'

        runs.append(fparticle)

        current_t = fparticle.screen[-1]['mean_t']
        current_p = fparticle.screen[-1]['mean_p']
        current_z = fparticle.screen[-1]['mean_z']
        current_s = fparticle.screen[-1]['mean_z']

        if (verbose):
            print(f'   energy gain: {p2e(current_p)-p2e(p)} eV.')

    # Autoscale first element
    for ii, auto_element in enumerate(auto_elements):

        #print(current_z, auto_element.z_beg_ccs)

        assert np.abs(
            current_z - get_auto_element_z_ccs_beg(auto_element)
        ) < 1e-14, f'Error Phasing {auto_element.name}: particle was not located at cavity entrance.'

        ts.append(current_t)
        ps.append(current_p)
        zs.append(current_z)

        # phase
        t1 = time.time()
        run = autoscale1_element(current_t,
                                 current_p,
                                 auto_element,
                                 verbose=True)
        current_ccs = auto_element.ccs_end
        t2 = time.time()

        runs.append(run)

        current_t = run.screen[-1]['mean_t']
        current_p = run.screen[-1]['mean_p']
        current_z = run.screen[-1]['mean_z']

        current_t = run.screen[-1]['mean_t']
        current_p = run.screen[-1]['mean_p']
        current_z = get_auto_element_z_ccs_end(auto_element)
        current_s = get_auto_element_s_end(auto_element)

        ts.append(current_t)
        ps.append(current_p)
        zs.append(current_z)
        ss.append(current_s)

        if (ii + 1 < len(auto_elements)):  # Track to next cavity
            next_auto_element = auto_elements[ii + 1]
            msg = f'\n> Tracking: {auto_element.name}:{next_auto_element.name}'
        elif (auto_element.name != lattice[-1].name):
            next_auto_element = None
            msg = f'\n> Tracking: {auto_element.name}:END'
        else:
            break

        if (verbose):
            print(msg)

        fparticle = ztrack1_to_autoscale_element(lattice,
                                                 current_t,
                                                 current_p,
                                                 current_z,
                                                 next_auto_element,
                                                 workdir=workdir,
                                                 ccs=current_ccs)
        runs.append(fparticle)

        if (next_auto_element):

            assert fparticle is not None, f'Particle tracking from {auto_element.name} to {auto_elements[ii+1].name} failed.'

            print(fparticle.screen[-1]['mean_z'],
                  get_auto_element_z_ccs_beg(auto_elements[ii + 1]))

            assert np.abs(
                fparticle.screen[-1]['mean_z'] -
                get_auto_element_z_ccs_beg(auto_elements[ii + 1])
            ) < 1e-14, f'Error scaling {auto_element.name}: particle was not located at next element entrance after tracking.'
        else:
            assert fparticle is not None, f'Particle tracking from {auto_element.name} to END failed.'

        current_t = fparticle.screen[-1]['mean_t']
        current_p = fparticle.screen[-1]['mean_p']
        current_z = fparticle.screen[-1]['mean_z']

        if (verbose):
            print(
                f'   energy gain: { (p2e(current_p)-p2e(ps[-1]))/p2e(ps[-1])} eV.'
            )

    if (ztrack1_through):

        if (workdir is None):
            tempdir = tempfile.TemporaryDirectory(dir=workdir)
            gpt_file = os.path.join(tempdir.name, f'track_lattice.gpt.in')
            workdir = tempdir.name

        else:

            gpt_file = os.path.join(workdir, f'gpt.temp.in')

        lattice.write_gpt_lines(ztrack1_template(gpt_file),
                                output_file=gpt_file)

        G = GPT(gpt_file, workdir=workdir, use_tempdir=False)
        G = G.track1_in_ccs(zs[0],
                            lattice[-1].z_end_ccs,
                            pz0=p,
                            t0=t,
                            n_screen=n_screen)

    return (ts, ps, zs, ss, runs, G)
예제 #17
0
#Author Nícolas A. Ramos.

from pocketsphinx import LiveSpeech
from gpt import GPT, Example, add_example
from openai import api_key

api_key = data["API_KEY"]

#Definition of gpt
gpt_ = GPT(engine="davinci", temperature=0.5, max_tokens=100)

#Trainer
add_example(Example('INPUT_ONE', 'OUTPUT_ONE'))
add_example(Example('INPUT_TWO', 'OUTPUT_TWO'))
add_example(Example('INPUT_TREE', 'OUTPUT_TREE'))
add_example(Example('INPUT_FOUR', 'OUTPUT_FOUR'))
add_example(Example('INPUT_FIVE', 'OUTPUT_FIVE'))

#Capture of Speech while the gpt analyzes the instructions.
for phrase in LiveSpeech():
    output = gpt_.submit_request(phrase)
    output.choices[0].text
예제 #18
0
import json
import openai

with open('GPT_SECRET_KEY.json') as f:
    data = json.load(f)

openai.api_key = data["API_KEY"]

from gpt import GPT
from gpt import Example

gpt = GPT(engine="davinci", temperature=0.5, max_tokens=100)

gpt.add_example(
    Example('Fetch unique values of DEPARTMENT from Worker table.',
            'Select distinct DEPARTMENT from Worker;'))
gpt.add_example(
    Example(
        'Print the first three characters of FIRST_NAME from Worker table.',
        'Select substring(FIRST_NAME,1,3) from Worker;'))
gpt.add_example(
    Example(
        "Find the position of the alphabet ('a') in the first name column 'Amitabh' from Worker table.",
        "Select INSTR(FIRST_NAME, BINARY'a') from Worker where FIRST_NAME = 'Amitabh';"
    ))
gpt.add_example(
    Example(
        "Print the FIRST_NAME from Worker table after replacing 'a' with 'A'.",
        "Select CONCAT(FIRST_NAME, ' ', LAST_NAME) AS 'COMPLETE_NAME' from Worker;"
    ))
gpt.add_example(
예제 #19
0
def autophase1(lattice,
               t=0,
               p=1e-15,
               z=None,
               workdir=None,
               ztrack1_through=True,
               verbose=True,
               n_screen=200):

    ts = []
    ps = []
    zs = []

    runs = []

    rf_elements = [
        element for element in lattice._elements
        if (element.type in ['Map1D_TM', 'Map25D_TM'])
    ]

    if (len(rf_elements) < 1):

        if (verbose):
            print('autophase1: no cavities to phase')
            print(f'\n> Tracking: BEG:END')

        ts.append(t)
        ps.append(p)
        zs.append(lattice[0].z_beg_ccs)

        fparticle = ztrack1_to_autoscale_element(lattice,
                                                 0,
                                                 p,
                                                 lattice[0].z_beg_ccs,
                                                 workdir=None)
        assert fparticle is not None, f'Particle tracking from BEG to END failed.'
        assert np.abs(
            fparticle.screen[-1]['mean_z'] - lattice[-1].z_end_ccs
        ) < 1e-14, f'Error tracking to END: particle was not located at cavity entrance.'

        runs.append(fparticle)

        current_t = fparticle.screen[-1]['mean_t']
        current_p = fparticle.screen[-1]['mean_p']
        current_z = fparticle.screen[-1]['mean_z']

        ts.append(current_t)
        ps.append(current_p)
        zs.append(current_z)
        runs.append(fparticle)

        if (verbose):
            print(f'   energy gain: {p2e(current_p)-p2e(p)} eV.')

        if (ztrack1_through):

            if (workdir is None):
                tempdir = tempfile.TemporaryDirectory(dir=workdir)
                gpt_file = os.path.join(tempdir.name, f'track_lattice.gpt.in')
                workdir = tempdir.name

            else:

                gpt_file = os.path.join(workdir, f'gpt.temp.in')

            lattice.write_gpt_lines(ztrack1_template(gpt_file),
                                    output_file=gpt_file)

            G = GPT(gpt_file, workdir=workdir, use_tempdir=False)
            G = G.track1_in_ccs(lattice[0].z_beg_ccs,
                                lattice[-1].z_end_ccs,
                                pz0=p,
                                t0=t,
                                n_screen=n_screen)

            return (ts, ps, zs, runs, G)

    current_t = t
    current_p = p
    current_z = lattice[0].z_beg_ccs

    z_start = current_z

    if (current_z < rf_elements[0].z_beg_ccs):

        if (verbose):
            print(f'\n> Tracking: BEG:{rf_elements[0].name}')

        fparticle = ztrack1_to_autoscale_element(lattice,
                                                 current_t,
                                                 current_p,
                                                 current_z,
                                                 rf_elements[0],
                                                 workdir=None)
        assert fparticle is not None, f'Particle tracking from BEG to {rf_elements[0].name} failed.'
        assert np.abs(
            fparticle.screen[-1]['mean_z'] - rf_elements[0].z_beg_ccs
        ) < 1e-14, f'Error tracking to {rf_elements[0].name}: particle was not located at cavity entrance.'

        runs.append(fparticle)

        current_t = fparticle.screen[-1]['mean_t']
        current_p = fparticle.screen[-1]['mean_p']
        current_z = fparticle.screen[-1]['mean_z']

        if (verbose):
            print(f'   energy gain: {p2e(current_p)-p2e(p)} eV.')

    # Check that rf_elements do not overlap:
    for ii, cav in enumerate(rf_elements[:-1]):
        if (ii + 1 < len(rf_elements)):
            next_cav = rf_elements[ii + 1]
            assert cav.z_end_ccs <= next_cav.z_beg_ccs, f'Autophasing Error: cavities {cav.name} and {next_cav.name} overlap and cannot be phased.'

    # Autophase first cavity
    for ii, rf_element in enumerate(rf_elements):

        assert np.abs(
            current_z - rf_element.z_beg_ccs
        ) < 1e-14, f'Error Phasing {rf_element.name}: particle was not located at cavity entrance.'

        ts.append(current_t)
        ps.append(current_p)
        zs.append(current_z)

        # phase
        t1 = time.time()
        run = rf_element.autophase(t=current_t,
                                   p=current_p,
                                   workdir=workdir,
                                   verbose=verbose)
        t2 = time.time()

        runs.append(run)

        current_t = run.screen[-1]['mean_t']
        current_p = run.screen[-1]['mean_p']
        current_z = run.screen[-1]['mean_z']

        ts.append(current_t)
        ps.append(current_p)
        zs.append(current_z)

        if (ii + 1 < len(rf_elements)):  # Track to next cavity
            next_rf_element = rf_elements[ii + 1]
            msg = f'\n> Tracking: {rf_element.name}:{next_rf_element.name}'
        elif (rf_element.name != lattice[-1].name):
            next_rf_element = None
            msg = f'\n> Tracking: {rf_element.name}:END'
        else:
            break

        if (verbose):
            print(msg)

        fparticle = ztrack1_to_autoscale_element(lattice,
                                                 current_t,
                                                 current_p,
                                                 current_z,
                                                 next_rf_element,
                                                 workdir=workdir)
        runs.append(fparticle)

        if (next_rf_element):

            assert fparticle is not None, f'Particle tracking from {rf_element.name} to {rf_elements[ii+1].name} failed.'
            assert np.abs(
                fparticle.screen[-1]['mean_z'] - rf_elements[ii + 1].z_beg_ccs
            ) < 1e-14, f'Error Phasing {rf_element.name}: particle was not located at cavity entrance after tracking to cavity.'
        else:
            assert fparticle is not None, f'Particle tracking from {rf_element.name} to END failed.'

        current_t = fparticle.screen[-1]['mean_t']
        current_p = fparticle.screen[-1]['mean_p']
        current_z = fparticle.screen[-1]['mean_z']

        if (verbose):
            print(
                f'   energy gain: { (p2e(current_p)-p2e(ps[-1]))/p2e(ps[-1])} eV.'
            )

    if (ztrack1_through):

        if (workdir is None):
            tempdir = tempfile.TemporaryDirectory(dir=workdir)
            gpt_file = os.path.join(tempdir.name, f'track_lattice.gpt.in')
            workdir = tempdir.name

        else:

            gpt_file = os.path.join(workdir, f'track_lattice.gpt.in')

        lattice.write_gpt_lines(ztrack1_template(gpt_file),
                                output_file=gpt_file)

        G = GPT(gpt_file, workdir=workdir, use_tempdir=False)
        G = G.track1_in_ccs(z_start,
                            lattice[-1].z_end_ccs,
                            pz0=p,
                            t0=t,
                            n_screen=n_screen)

    return (ts, ps, zs, runs, G)
예제 #20
0
    def track_ref(self,
                  t0=0,
                  p0=1e-15,
                  xacc=6.5,
                  GBacc=5.5,
                  dtmin=1e-14,
                  dtmax=1e-8,
                  Ntout=100,
                  workdir=None):

        dz_ccs_beg = np.linalg.norm(self.p_beg - self._ccs_beg_origin)

        dz_fringe = 0

        if (np.abs(self._b1) > 0):
            dz_fringe = 10.0 / self._b1
        else:
            dz_fringe = 0

        settings = {
            'xacc': xacc,
            'GBacc': GBacc,
            'dtmin': dtmin,
            'dtmax': dtmax,
            'Ntout': Ntout,
            'ZSTART':
            -2 * np.sign(dz_ccs_beg - dz_fringe) * dz_ccs_beg - dz_fringe
        }

        particle = single_particle(z=dz_ccs_beg - dz_fringe,
                                   pz=p0,
                                   t=0,
                                   weight=1,
                                   status=1,
                                   species=self.species)

        if (workdir is None):
            tempdir = tempfile.TemporaryDirectory(dir=workdir)
            gpt_file = os.path.join(tempdir.name,
                                    f'track_to_{self.name}.gpt.in')
            workdir = tempdir.name

        else:

            gpt_file = os.path.join(workdir, f'{self.name}.gpt.in')

        self.write_element_to_gpt_file(basic_template(gpt_file))

        G = GPT(gpt_file,
                initial_particles=particle,
                ccs_beg=self.ccs_beg,
                workdir=workdir,
                use_tempdir=False)
        G.set_variables(settings)
        G.track1_to_z(z_end=dz_fringe,
                      ds=self.length + 2 * dz_fringe,
                      ccs_beg=self.ccs_beg,
                      ccs_end=self.ccs_end,
                      z0=dz_ccs_beg - dz_fringe,
                      t0=t0,
                      pz0=p0,
                      species=self.species,
                      s_screen=self.s_end + dz_fringe)

        #os.remove(gpt_file)

        return G
예제 #21
0
from key import *
import glob
import openai
from gpt import GPT
from gpt import Example


openai.api_key = key
gpt = GPT(
    engine="davinci",
    temperature=0.5,
    output_prefix="Output: \n\n",
    max_tokens=100
)

# add some code examples
"""for file in glob.glob("examples/*"):
    title = file.replace("_", " ")
    with open(f"{file}", "r") as f:
        code = f.read()
    gpt.add_example(Example(title, code))"""


premise1 = "Here is a premise: While at Skidmore , Smith also designed an even taller mixed-use skyscraper , the Burj Dubai , now under construction in the United Arab Emirates ."
prompt1 = premise1 + " " + \
    "The phrase  Smith designed a skyscraper  is entailed by which part of the premise: "
output1 = "Smith also designed an even taller mixed-use skyscraper"

prompt6 = premise1 + " " + \
    "The phrase  the Burj Dubai is a skyscraper  is entailed by which part of the premise: "
output6 = "an even taller mixed-use skyscraper , the Burj Dubai"
예제 #22
0
import json
import openai
import numpy as np
import pandas as pd

from gpt import set_openai_key
from gpt import GPT
from gpt import Example

set_openai_key()

gpt_sql = GPT(engine="davinci", temperature=0.5, max_tokens=100)
gpt_js = GPT(engine="davinci", temperature=0.5, max_tokens=100)
gpt_python = GPT(engine="davinci", temperature=0.5, max_tokens=100)
gpt_php = GPT(engine="davinci", temperature=0.5, max_tokens=100)

#df = pd.read_csv("states_all.csv")
#df = pd.DataFrame({"Gender": ["boy", "boy", "boy", "boy", "boy", "girl", "girl", "girl", "girl"],
#                   "Division": ["one", "one", "one", "two", "two",
#                                "one", "one", "two", "two"],
#                   "Marks": [50, 55, 67, 85, 44, 84, 65, 56, 87]})
#print(df)


def train_sql_model():
    gpt_sql.add_example(
        Example('Fetch unique values of DEPARTMENT from Worker table.',
                'Select distinct DEPARTMENT from Worker;'))
    gpt_sql.add_example(
        Example(
            'Print the first three characters of FIRST_NAME from Worker table.',
예제 #23
0
def index():
    return render_template('index.html', title=title, context=context)


@app.route('/query', methods=['POST'])
def request_query():
    query = request.form['query']
    response = gpt.submit_request(query)
    return {'text': response['choices'][0]['text'][7:]}


title = '¿Que le respondo al profe?'
context = 'El objetivo de esta simulación es encontrar la respuesta perfecta para darle al profe en una situación complicada.'

examples = {
    "Profesor: que pensás hacer respecto a tus trabajos prácticos?":
    "Yo: Trataré de hacer todos los trabajos prácticos antes de que terminen las clases.",
    "Profesor: Alguna duda te quedó respecto de la clase?":
    "Yo: Creo que me gustaría que volviera a explicar el tema pero con menos detalles para tener una idea más clara.",
    "Profesor: Todo listo para el examen final?":
    "Yo:  Si profe, he estado estudiando todos los temas, alguna recomendación sobre qué estudiar en específico?",
    "Profesor: Has estado teniendo algún problema al llevar la clase?":
    "Yo:  En general todo bien, pero he tenido algunos problemas con un par de temas."
}

gpt = GPT(engine="davinci", temperature=0.5, max_tokens=100, context=context)

gpt.add_examples(examples)

if __name__ == "__main__":
    app.run(debug=1)
예제 #24
0
def get_gpt(gpt_key, gpt_engine, gpt_temperature, gpt_max_tokens):
    ''' define a gpt object
    
    Args:
        gpt_key: key under "Secret" here https://beta.openai.com/developer-quickstart
        gpt_engine: language model identifier (see https://beta.openai.com/api-ref for valid values)
        gpt_temperature: sampling temperature - Higher values means the model will take more risks
        gpt_max_tokens: How many tokens to complete to, up to a maximum of 512.
    
    Returns:
        gpt: gpt object (newly created gpt object if use_saved_gpt is False; gpt object from pickle file if use_saved_gpt is True)

    '''
    try:
        # check whether to use gpt from pickle file
        # create a new gpt object
        openai.api_key = gpt_key
        gpt = GPT(engine=gpt_engine,
                  temperature=gpt_temperature,
                  max_tokens=gpt_max_tokens)
        # add examples - potential improvement: read these examples from a file rather than hardcoding them
        gpt.add_example(Example('initialize a git repository', 'git init'))
        gpt.add_example(
            Example('add file foo to the staging area for git', 'git add foo'))
        gpt.add_example(
            Example(
                'add all files in the current directory to the staging area for git',
                'git add .'))
        gpt.add_example(
            Example(
                'record the changes made to the files to a local repository',
                'git commit -m "commit message"'))
        gpt.add_example(
            Example('return the current state of the repository',
                    'git status'))
        gpt.add_example(
            Example(
                'Clone the remote repository https://github.com/ryanmark1867/webview_rasa_example',
                'git clone https://github.com/ryanmark1867/webview_rasa_example'
            ))
        gpt.add_example(
            Example('remove file foo from the staging area', 'git rm -f foo'))
        gpt.add_example(
            Example('show the chronological commit history for a repository',
                    'git log'))
    except Exception as error:
        print('ERROR', error)
    else:
        return gpt
예제 #25
0
import openai
import random
from os import path

from openai.api_resources.engine import Engine

openai.api_key = ""  # find your own key by joining the waiting list
# at https://beta.openai.com/?demo=1

from gpt import GPT
from gpt import Example

gpt = GPT(engine="davinci", temperature=0.7, max_tokens=258)

gpt.add_example(
    Example(
        'This was my space and I chose to write this. Nothing else, but exactly this.'
    ))
예제 #26
0
    def run(self, inputs, verbose=False):
       
        tag = f'vb24@{self.id}:'

        #----------------------------------------------------------------------------
        # Get laser distribution, cathode quantities, and gun current
        #----------------------------------------------------------------------------
        r_params = {'sigma_xy': dunits(str(inputs[f'{tag}laser:sigma_xy'])),
                    'alpha':    dunits(str(inputs[f'{tag}laser:alpha_xy']))}

        count = self.pvdefs[f'{tag}laser:r']['count']
        laser_wavelength = inputs[f'{tag}laser:wavelength']
        laser_power = inputs[f'{tag}laser:power']
        laser_sigma_xy = inputs[f'{tag}laser:sigma_xy']
        laser_alpha_xy = inputs[f'{tag}laser:alpha_xy']
        laser_avg_x = inputs[f'{tag}laser:mean_x']
        laser_avg_y = inputs[f'{tag}laser:mean_y']

        r_dist = SuperGaussianRad(verbose=False, **r_params)
        rs = (r_dist.get_r_pts(count)).to(self.pvdefs[f'{tag}laser:r']['unit'])
        Pr = (dunits(str(laser_power))*r_dist.rho(rs)).to(self.pvdefs[f'{tag}laser:Pr']['unit'])

        cathode_QE = inputs[f'{tag}cathode:QE']
        cathode_MTE = inputs[f'{tag}cathode:MTE']

        hc = 1*units.h*units.c
        photon_flux = (laser_power/(hc/laser_wavelength) ).to_base_units()
        gun_current = (photon_flux*cathode_QE*(1*units.e)).to(self.pvdefs[f'{tag}gun:current']['unit'])
        #----------------------------------------------------------------------------
       

        #----------------------------------------------------------------------------
        # Create Distgen input and run generator
        #----------------------------------------------------------------------------
        distgen_input = yaml.dump(
                        {'n_particle':inputs[f'{tag}gpt:n_particle'].magnitude,
                         'random_type':'hammersley',
                         'total_charge': {'value': 0.0, 'units': 'pC'},   
                         'start': {
                             'type':'cathode',
                             'MTE': {'value': cathode_MTE.magnitude, 'units': str(cathode_MTE.units)}},

                         'r_dist': {
                             'type':'rsg',
                             'sigma_xy':{'value': laser_sigma_xy.magnitude, 'units': str(laser_sigma_xy.units)},
                             'alpha':{'value': laser_alpha_xy.magnitude, 'units': str(laser_alpha_xy.units)},},

                         'transforms':{
                             't1':{'type':'set_avg x', 'avg_x': {'value': laser_avg_x.magnitude, 'units': str(laser_avg_x.units)}},
                             't2':{'type':'set_avg y', 'avg_y': {'value': laser_avg_y.magnitude, 'units': str(laser_avg_y.units)}}
                         }})
 
        gen = Generator(distgen_input, verbose=True)     
        beam = gen.beam()   
        #----------------------------------------------------------------------------


        #----------------------------------------------------------------------------
        # Configure GPT and run
        #----------------------------------------------------------------------------
        G = GPT(input_file=os.path.join(os.getcwd(),'templates/gpt.in'), 
                initial_particles = ParticleGroup(data=beam.data()), 
                use_tempdir=True,
                workdir=os.path.join(os.getcwd(),'tmp'),
                timeout = 5,
                verbose=True)

        settings = {'gun_voltage':   inputs[f'{tag}gun:voltage'].magnitude, 
                    'sol01_current': inputs[f'{tag}sol1:current'].magnitude,
                    'sol02_current': inputs[f'{tag}sol2:current'].magnitude,
                    'npts':          inputs[f'{tag}gpt:n_screen'].magnitude+1}

        result = G.set_variables(settings)
        G.run()
        #----------------------------------------------------------------------------


        #----------------------------------------------------------------------------
        # Load all relevant data into output structure
        #----------------------------------------------------------------------------
        # laser distribution
        output = {f'{tag}laser:r':rs.magnitude, f'{tag}laser:Pr':Pr.magnitude, f'{tag}gun:current':gun_current.magnitude}

        # GPT statistical data
        stats = {'max':['r'], 'mean':['x', 'y', 'z', 'kinetic_energy'], 'sigma':['x','y']}
        for stat, variables in stats.items():
                output = {**output, **{f'{tag}beam:{stat}_{var}': self.gpt_stat_to_pv(G, f'{stat}_{var}', 'screen').magnitude for var in variables} }

        scr_numbers = [1]
        for scr_number in scr_numbers:
            z = inputs[f'{tag}scr{scr_number}:mean_z'].magnitude
            for var in ['x' ,'y']:
                output[f'{tag}scr{scr_number}:mean_{var}']  = np.interp(z, output[f'{tag}beam:mean_z'], output[f'{tag}beam:mean_{var}'])
                output[f'{tag}scr{scr_number}:sigma_{var}'] = np.interp(z, output[f'{tag}beam:mean_z'], output[f'{tag}beam:sigma_{var}'])
                
        # transmission
        output[f'{tag}beam:transmission'] = [100*len(screen['x'])/inputs[f'{tag}gpt:n_particle'].magnitude for screen in G.screen]
    
        min_clearance = np.min( (inputs[f'{tag}beampipe:radius']-self.gpt_stat_to_pv(G, f'{stat}_{var}', 'screen') ) ).to('mm')
        output[f'{tag}beam:radiation'] = output[f'{tag}gun:current']*np.max(output[f'{tag}beam:mean_kinetic_energy'])/min_clearance.magnitude
        #----------------------------------------------------------------------------


        return output