示例#1
0
def relay():
    """ request.json = [int/float]*310 """
    global action
    state = get_entities(request.json)
    me, opp, good_tower, bad_tower, stats, creeps, allies, enemies = state
    d = 2000
    current_observation = [
        *[abs(me.x - e.x) / d for e in [opp, good_tower, bad_tower] + creeps],
        *[abs(me.y - e.y) / d for e in [opp, good_tower, bad_tower] + creeps],
        *[abs(creep.x - bad_tower.x) / d for creep in allies],
        *[abs(creep.y - bad_tower.y) / d for creep in allies],
        me.hp / me.maxhp,
        me.mana / me.abilities[0].cost,
        opp.hp / opp.maxhp,
        *[creep.hp / me.dmg for creep in creeps]
    ]
    observations, states = list(zip(last4states))
    observation = [i for j in observations for i in j]

    last4states.append((current_observation, state))

    if stats.time < 10 or abs(me.xy - good_tower.xy) > d:
        return {'actionCode': 1}

    legal_actions = get_legal_moves(me, opp, allies, enemies, bad_tower, stats)
    observations, states = list(zip(last4states))
    observation_ = [i for j in observations for i in j]
    r = reward(*states[-2:])
    agent.memory.append((observation, action, r, obeservation_))
    action = agent.predict(last4states, legal_actions)}
    return {'actionCode': action}
示例#2
0
文件: main.py 项目: deflaux/horsefish
def prepare_and_launch(file_path):
    # get access token and input to headers for requests
    headers = {"Authorization": "bearer " + get_access_token()}

    # get the workflow config
    workflow = get_workspace_config(WORKSPACE_NAMESPACE, WORKSPACE_NAME,
                                    WORKFLOW_NAMESPACE, WORKFLOW_NAME, headers)
    check_fapi_response(workflow, 200)
    workflow_config_json = workflow.json()

    # This workflow uses inputs from the data table as well as the file_path
    # value input to this function. We first pull the root entity type from
    # the workflow config, and then look for sets of that entity type,
    # selecting the first set found in the data table.
    root_entity_type = workflow_config_json['rootEntityType']

    expression = f'this.{root_entity_type}s'
    set_entity_type = f'{root_entity_type}_set'
    entities = get_entities(WORKSPACE_NAMESPACE, WORKSPACE_NAME,
                            set_entity_type, headers)
    check_fapi_response(entities, 200)
    all_set_names = [ent['name'] for ent in entities.json()]
    set_to_use = all_set_names[0]  # use the first set

    # Next we need to add the specific input from file_path. We update this value
    # in the inputs section of the workflow_config_json.
    for input_value in workflow_config_json['inputs']:
        if input_value.endswith(INPUT_NAME):
            workflow_config_json['inputs'][input_value] = f"\"{file_path}\""

    # remove outputs assignment from config
    workflow_config_json['outputs'] = {}

    # update the workflow configuration
    updated_workflow = update_workspace_config(WORKSPACE_NAMESPACE,
                                               WORKSPACE_NAME,
                                               WORKFLOW_NAMESPACE,
                                               WORKFLOW_NAME,
                                               workflow_config_json, headers)
    check_fapi_response(updated_workflow, 200)

    # launch the workflow
    create_submisson_response = create_submission(WORKSPACE_NAMESPACE,
                                                  WORKSPACE_NAME,
                                                  WORKFLOW_NAMESPACE,
                                                  WORKFLOW_NAME,
                                                  headers,
                                                  use_callcache=True,
                                                  entity=set_to_use,
                                                  etype=set_entity_type,
                                                  expression=expression)
    check_fapi_response(create_submisson_response, 201)

    submission_id = create_submisson_response.json()['submissionId']
    print(f"Successfully created submission: submissionId = {submission_id}.")
示例#3
0
文件: ttopic.py 项目: iconix/techm
        def __init__(self, name, url):
            normalized_name = unicodedata.normalize('NFKD', name).encode('ascii', 'ignore')
            if normalized_name == 'All':  # no particular topics
                url_str = url.string.encode('ascii', 'ignore')
            else:
                gn_rss = 'https://www.google.com/news?cf=all&ned=us&hl=en&output=rss&num=100'
                url_str = gn_rss + '&q=' + urllib.quote(normalized_name)

            self._Article = None  # inner class
            self._name = normalized_name
            self._url = url_str
            self._entities = get_entities(self)
def main():
    title = sys.argv[1]
    client = HttpClient()
    params = {
        'action': 'query',
        'format': 'json',
        'prop': 'revisions',
        'rvprop': 'content',
        'titles': title,
    }
    data = client.get(URL, params)
    if data is not None:
        contents = get_contents(data)
        entities = get_entities(contents)
        pprint(entities)
        print('=' * 72)

        print('\nカテゴリのみを出力')
        for category in filter(lambda x: x.startswith('Category:'), entities):
            print(category)
    def __load_files(self):
        logging.info("Started loading the files...")
        with open(self.ans_file, "r") as afile:
            self.avals = pkl.load(afile)

        with open(self.desc_file, "r") as dfile:
            self.dvals = pkl.load(dfile)

        with open(self.q_file, "r") as qfile:
            self.qvals = pkl.load(qfile)

        with open(self.vocab, "r") as vocabf:
            self.adict = pkl.load(vocabf)

        self.iadict = {v: k for k, v in self.adict.iteritems()}
        self.data_len = len(self.avals)

        if self.stop == -1:
            self.stop = self.data_len

        self.vsize = len(self.adict)
        self.entities = get_entities(self.adict)
        logging.info("Loaded the files...")
    def __load_files(self):
        logging.info("Started loading the files...")
        with open(self.ans_file, "r") as afile:
            self.avals = pkl.load(afile)

        with open(self.desc_file, "r") as dfile:
            self.dvals = pkl.load(dfile)

        with open(self.q_file, "r") as qfile:
            self.qvals = pkl.load(qfile)

        with open(self.vocab, "r") as vocabf:
            self.adict = pkl.load(vocabf)

        self.iadict = {v:k for k, v in self.adict.iteritems()}
        self.data_len = len(self.avals)

        if self.stop == -1:
            self.stop = self.data_len

        self.vsize = len(self.adict)
        self.entities = get_entities(self.adict)
        logging.info("Loaded the files...")
示例#7
0
 def test_single_entry(self):
     expected = ['エントリ']
     actual = get_entities("括弧でくくられた[[エントリ]]を取り出す")
     self.assertEqual(expected, actual)
示例#8
0
def main():
    parser = argparse.ArgumentParser(
        description="""Count comments in a given number of posts""")
    parser.add_argument(
        '-c', '--conf', type=str, metavar='', required=True,
        help='Specify the path of the configuration file')
    args = parser.parse_args()
    config_path = args.conf
    start = time.time()
    logger = get_logger(__name__)
    logger.setLevel(logging.DEBUG)
    conf = load_config(config_path)
    supported_languages = ["it", "en"]
    lang = input("Insert language (it, en): ")
    if lang not in supported_languages:
        logger.error("Please provide a valid language. Supported: 'en', 'it'")
        sys.exit(1)
    else:
        try:
            model = conf.get(lang)
            nlp = spacy.load(model)
        except OSError:
            logger.error("Could not find model in conf file. Please double check")
            sys.exit(0)
    n_posts = check_n_posts()
    if not n_posts.isdigit() and n_posts != "-1":
        logger.error("Please give a number. Exiting")
        sys.exit(0)
    try:
        access_token = conf["access_token"]
        page_id = conf["page_id"]
        n_top_entities = conf["n_top_entities"]
        data_dir_path = os.path.join(page_id, conf["data_dir_name"])
        data_filename = "{}_{}.tsv".format(conf["data_entities_prefix"], str(n_posts))
        plots_dir_path = os.path.join(page_id, conf["plots_dir_name"])
        barplot_filename = "{}_{}posts_ner.png".format(conf["barplot_filename"], str(n_posts))
        barplot_filepath = os.path.join(plots_dir_path, barplot_filename)
    except KeyError:
        logger.error(
            "Invalid configuration file. Please check template and retry")
        sys.exit(0)
    try:
        graph = facebook.GraphAPI(access_token)
        logger.info("Graph API connected")
        profile = graph.get_object(page_id)
    except facebook.GraphAPIError as e:
        logger.error("Could not log in. {}".format(e))
        sys.exit(0)
    if n_posts != "":
        logger.info("Getting the last {} posts".format(n_posts))
    else:
        logger.warning(
            "Requesting posts with no limits. "
            "This could be susceptible of limitations"
            " in the near future due to high rate"
        )
    local_start = time.time()
    posts = graph.get_connections(profile["id"], "posts", limit=n_posts)
    comments = []
    for post in posts["data"]:
        url_post = "https://www.facebook.com/posts/{}".format(post["id"])
        logger.info("Getting data for post {}".format(url_post))
        post_data = get_post_data(access_token, post["id"])
        post_comments = get_comments(post_data)
        if len(post_comments) == 0:
            logger.warning(
                """Apparently, there are no comments at the selected post
                Check the actual post on its Facebook page 
                https://www.facebook.com/posts/{}""".format(post["id"])
            )
        comments.extend(post_comments)
    if len(comments) == 0:
        logger.error("Could not get any comments. Exiting gracefully")
        sys.exit(0)
    elif len(comments) < 100:
        logger.warning(
            "Found {} comment(s). Not enough data "
            "to make much sense. Plots will be made regardless".format(
                len(comments)
            )
        )
    else:
        logger.info("Got {} comments from {} post(s) in {} seconds".format(
            len(comments), len(posts["data"]), round((time.time() - local_start), 1)))
    local_start = time.time()
    entities = []
    for comment in comments:
        ents = get_entities(nlp, comment)
        entities.extend(ents)
    logger.info("Extracted {} entities out of {} comments in {} seconds".format(
        len(entities), len(comments), round((time.time() - local_start), 2)))
    entities_data = count_entities(entities)
    create_nonexistent_dir(data_dir_path)
    data_filepath = os.path.join(data_dir_path, data_filename)
    columns = ["entities", "count"]
    data_to_tsv(entities_data, columns, data_filepath)
    logger.info("Saved {} unique entities and their counts in {} ".format(
        len(entities_data), data_filepath))
    create_nonexistent_dir(plots_dir_path)
    plot_labels = ["Entities", "Counts"]
    save_barplot(entities_data, plot_labels, n_top_entities, barplot_filepath, type_="entities")
    logger.info("Bar plot saved at {}".format(barplot_filepath))
    logger.info("\a\a\aDIN DONE! in {} seconds".format(
        round((time.time() - start), 1)))
示例#9
0
import os, json
from utils import get_entities
from collections import OrderedDict


def list2dict(l):
    _dict = {}
    for key in l:
        _dict[key] = [key]
    return _dict


def get_dict(original_dict):
    new_dict = OrderedDict()
    for key, values in original_dict.items():
        new_dict[key] = list2dict(values)
    return new_dict


def dict2json(d, out_file):
    with open(out_file, 'w') as fp:
        json.dump(d, fp)


entity_dict = get_entities('entities.txt')
dd = get_dict(entity_dict)
file_name = 'deepPavlov/DeepPavlov/download/slots/pharma.json'
output_file = os.path.join(os.path.join(os.getcwd(), os.pardir), file_name)
dict2json(dd, output_file)
def main():
    service = authenticate(sys.argv)
    calendars = get_existing_calendars(service)
    # make_calendars_public(service, calendars)
    entities = get_entities()
    create_calendars_for_entities(entities, calendars, service)
示例#11
0
def test_single_entry():
    expected = ['エントリ']
    actual = get_entities("括弧でくくられた[[エントリ]]を取り出す")
    assert expected == actual
示例#12
0
 def test_empty_string(self):
     self.assertEqual([], get_entities(""))
示例#13
0
    print('Test Acc: {:.3f}% ({}/{})'.format(100 * correct / total, correct,
                                             total))


def simulate_test_dialogs(how_many, print_simulated_dialog=False):
    #model.eval()
    with torch.no_grad():
        for i in range(how_many):
            episode_actions, episode_return, dialog = simulate_dialog(
                system_acts, is_test=True)
            if print_simulated_dialog:
                print(dialog, 'return', episode_return)
        return episode_return


entities = get_entities('dialog-bAbI-tasks/dialog-babi-kb-all.txt')
for idx, (ent_name, ent_vals) in enumerate(entities.items()):
    print('entities', idx, ent_name, ent_vals[0])

assert args.task == 5 or args.task == 6, 'task must be 5 or 6'
if args.task == 5:
    fpath_train = 'dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-trn.txt'
    #fpath_train = 'pretrain_dialogs.txt'
    fpath_test = 'dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst-OOV.txt'
elif args.task == 6:  # this is not working yet
    fpath_train = 'dialog-bAbI-tasks/dialog-babi-task6-dstc2-trn.txt'
    fpath_test = 'dialog-bAbI-tasks/dialog-babi-task6-dstc2-tst.txt'

system_acts = [g.SILENT]

vocab = []
示例#14
0
def main():
    parser = argparse.ArgumentParser(
        description="""Count comments in a given number of posts""")
    parser.add_argument('-c',
                        '--conf',
                        type=str,
                        metavar='',
                        required=True,
                        help='Specify the path of the configuration file')
    args = parser.parse_args()
    config_path = args.conf
    start = time.time()
    logger = get_logger(__name__)
    logger.setLevel(logging.DEBUG)
    conf = load_config(config_path)
    supported_languages = ["it", "en"]
    lang = input("Insert language (it, en): ")
    if lang not in supported_languages:
        logger.error("Please provide a valid language. Supported: 'en', 'it'")
        sys.exit(1)
    else:
        try:
            model = conf.get(lang)
            nlp = spacy.load(model)
        except OSError:
            logger.error(
                "Could not find model in conf file. Please double check")
            sys.exit(0)
    post_id = ""
    while post_id == "":
        post_id = input("Provide post ID: ")
    try:
        access_token = conf["access_token"]
        page_id = conf["page_id"]
        n_top_entities = conf["n_top_entities"]
        data_dir_path = os.path.join(page_id, conf["data_dir_name"])
        data_filename = "{}_{}{}".format(conf["data_entities_prefix"], post_id,
                                         ".csv")
        plots_dir_path = os.path.join(page_id, conf["plots_dir_name"])
        barplot_filename = "{}_{}{}".format(conf["barplot_filename"], post_id,
                                            "_ner.png")
        barplot_filepath = os.path.join(plots_dir_path, barplot_filename)
    except KeyError:
        logger.error(
            "Invalid configuration file. Please check template and retry")
        sys.exit(0)
    actual_post_id = page_id + "_" + post_id
    url_post = "https://www.facebook.com/posts/{}".format(actual_post_id)
    logger.info("Getting data for post {}".format(url_post))
    local_start = time.time()
    data = get_post_data(access_token, actual_post_id)
    comments = get_comments(data)
    if len(comments) == 0:
        logger.error("""Apparently, there are no comments at the selected post
            Check the actual post on its Facebook page 
            https://www.facebook.com/{}/posts/{}""".format(page_id, post_id))
        sys.exit(0)
    elif len(comments) < 100:
        logger.warning(
            "Got {} comments. Not enough data "
            "to make much sense. Plots will be made regardless".format(
                len(comments)))
    else:
        logger.info("Got {} comments in {} seconds".format(
            len(comments), round((time.time() - local_start), 2)))
    local_start = time.time()
    entities = []
    for comment in comments:
        ents = get_entities(nlp, comment)
        entities.extend(ents)
    logger.info(
        "Extracted {} entities out of {} comments in {} seconds".format(
            len(entities), len(comments), round((time.time() - local_start),
                                                2)))
    entities_data = count_entities(entities)
    create_nonexistent_dir(data_dir_path)
    data_filepath = os.path.join(data_dir_path, data_filename)
    columns = ["entities", "count"]
    data_to_tsv(entities_data, columns, data_filepath)
    logger.info("Saved {} unique entities and their counts in {} ".format(
        len(entities_data), data_filepath))
    create_nonexistent_dir(plots_dir_path)
    plot_labels = ["Entities", "Counts"]
    save_barplot(entities_data, plot_labels, n_top_entities, barplot_filepath)
    logger.info("Bar plot saved at {}".format(barplot_filepath))
    logger.info("\a\a\aDIN DONE! in {} seconds".format(
        round((time.time() - start), 1)))
示例#15
0
import re
from utils import get_pdf_name_list, get_entities, es_search

if __name__ == "__main__":

    pdfs = get_pdf_name_list()
    entities = get_entities()
    filter_list = []
    for k, v in entities.items():
        filter_list = filter_list + v

    must_list = [{
        "match_phrase": {
            "name": "三生国健药业(上海)股份有限公司科创板首次公开发行股票招股说明书(上会稿).pdf"
        }
    }]
    # for k, v in entities.items():
    #     must_list = must_list + [{"match": {"content": f}} for f in v]

    filter_list = [{"term": {"content": f}} for f in filter_list]
    result = es_search(must_list, [], [], [])
    # print(result["hits"])
示例#16
0
 def test_none_param(self):
     with self.assertRaises(TypeError):
         get_entities(None)
示例#17
0
 def test_actual_contents(self):
     actual = get_entities(self.contents)
     self.assertEqual(223, len(actual))
     self.assertEqual('File:Prog_one.png', actual[0])
     self.assertEqual('Category:ソフトウェア開発工程', actual[-1])
示例#18
0
parser.add_argument('--task',
                    type=int,
                    default=5,
                    help='5 for Task 5 and 6 for Task 6')
parser.add_argument('--seed', type=int, default=1111, help='random seed')
parser.add_argument('--vocab_full',
                    type=int,
                    default=0,
                    help='option to use full embedding matrix')
args = parser.parse_args()

# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)

# entities = get_entities('/Users/graeme/dialog-bAbI-tasks/dialog-babi-kb-all.txt')
entities = get_entities('entities.txt')

# for idx, (ent_name, ent_vals) in enumerate(entities.items()):
#     print('entities', idx, ent_name)

# assert args.task == 5 or args.task == 6, 'task must be 5 or 6'
# if args.task == 5:
fpath_train = 'dialogues_train.txt'
fpath_test = 'dialogues_test.txt'
save_path = 'new_trained_model.pt'
# word2vec_path = 'faqmodel.bin'
word2vec_path = '/Users/mac/Downloads/GoogleNews-vectors-negative300.bin'
database = 'NovartisQA'
# embedding_file = 'faqmodel.pkl'
embedding_file = 'pre_embd_w_full.pickle'
示例#19
0
def show_form(text):
    data = get_entities(text)
    return render_template("finalform.html", data=data)
    
示例#20
0
def test_actual_contents(contents):
    actual = get_entities(contents)
    assert 223 == len(actual)
    assert 'File:Prog_one.png' == actual[0]
    assert 'Category:ソフトウェア開発工程' == actual[-1]