예제 #1
0
def test_merge_models():
    index = util.load_index('casado')
    stmt, best_obj = util.merge_models([index['casado01'], index['casado03']])
    output = StringIO()
    ampl.pretty_print(output, stmt)
    assert output.getvalue() == \
  """var x1 in [0, 20];
var x2 in [-10, 10];
minimize f: ((exp(-3 * x1) - sin(x1) ^ 3) + 1.0) * ((x2 - x2 ^ 2) ^ 2 + (x2 - 1) ^ 2);
"""
    assert best_obj == 0
예제 #2
0
def test_merge_models():
  index = util.load_index('casado')
  stmt, best_obj = util.merge_models([index['casado01'], index['casado03']])
  output = StringIO()
  ampl.pretty_print(output, stmt)
  assert output.getvalue() == \
"""var x1 in [0, 20];
var x2 in [-10, 10];
minimize f: ((exp(-3 * x1) - sin(x1) ^ 3) + 1.0) * ((x2 - x2 ^ 2) ^ 2 + (x2 - 1) ^ 2);
"""
  assert best_obj == 0
예제 #3
0
from util import load_conll, eval_result, dev_and_test_comb, gen_data, pad_data, pad_label, pad_word_input, load_index, \
    data_to_seq
from config import config

# np.random.seed(7)

texts, labels = load_conll(config.train_path, config.labels_index)
val_texts, val_labels = load_conll(config.dev_path, config.labels_index)
# texts, labels = load_conll('keras_data/1.txt')
test_texts, test_labels = load_conll(config.test_path, config.labels_index)

# =====================
# build char cnn
# =====================
index_char = load_index(config.char_index)
# print(index_char)

MAX_WORD_LENGTH = config.word_length
wl = MAX_WORD_LENGTH

train_char, sl, wl = gen_data(texts, 0, wl, index_char)
val_char, sl, wl = gen_data(val_texts, sl, wl, index_char)
test_char, sl, wl = gen_data(test_texts, sl, wl, index_char)

MAX_SEQUENCE_LENGTH = sl

if MAX_SEQUENCE_LENGTH % 2 == 1:
    MAX_SEQUENCE_LENGTH += 1
print(MAX_WORD_LENGTH)
print(MAX_SEQUENCE_LENGTH)
예제 #4
0
# Benchmark example

import couenne, lgo
from util import Config, load_index

inputs = load_index('casado', 'hansen')

# Timeout in seconds
timeout = 60

configs = [
    Config('minos'),
    Config('baron'),
    Config('couenne', couenne.options()),
    Config('lgo', {'opmode': lgo.LOCAL_SEARCH_MODE}, suffix='local-search'),
    Config('lgo', {'opmode': lgo.MULTISTART_MODE}, suffix='multistart')
]
예제 #5
0
# This benchmark is run on a collection of Casado and Hansen problems.

import couenne, lgo
from util import Config, load_index
from common import *

inputs = load_index('casado', 'hansen')

# Timeout in seconds
timeout = 60

configs = [
  Config('minos'),
  Config('baron'),
  Config('couenne', couenne.options()),
  Config('lgo', {'opmode': lgo.LOCAL_SEARCH_MODE}, suffix='local-search'),
  Config('lgo', {'opmode': lgo.MULTISTART_MODE}, suffix='multistart', on_nl_file=lgo.make_maxfct_setter(2))
]
예제 #6
0
파일: 3dim.py 프로젝트: vitaut/informs2015
# This benchmark is run on a collection of 3-dimensional problems generated
# by combining problems from Casado and Hansen.

import couenne, lgo
from util import Config, get_problem_combinator, load_index
from common import *

inputs = get_problem_combinator(load_index('casado', 'hansen'), 3, 1000)

# Timeout in seconds
timeout = 60

configs = [
  Config('lgo', {'opmode': lgo.LOCAL_SEARCH_MODE}, suffix='local-search'),
  Config('lgo', {'opmode': lgo.MULTISTART_MODE},
         suffix='multistart', on_nl_file=lgo.make_maxfct_setter(2)),
  Config('lgo', {'opmode': lgo.MULTISTART_MODE},
         suffix='multistart-k4', on_nl_file=lgo.make_maxfct_setter(4)),
  Config('minos'),
  Config('baron'),
  Config('couenne', couenne.options())
]
예제 #7
0
from os.path import join
from collections import Counter
import time

from util import preprocess, load_index
from util import VOCAB_FILE
from Doc import doc
from documents import Documents
from output import Results

import pandas as pd
from pandas import DataFrame
import numpy as np

INDEX = load_index()


class Answer_query(object):
    """这个类 将一个查询语句  解析成向量的形式 """
    def __init__(self, query_string, topk=10):
        """args:
            query_string:查询的字符串
            topk:返回的文档的个数

        """
        self.query_string = query_string
        self.query_tokens = preprocess(self.query_string)
        self.topk = topk

    def vectorize(self):
예제 #8
0
                    candidates.append([index[words[1]][i][0],index[words[1]][i][1]/10000,index[words[1]][i][2],index[words[1]][i][3],index[words[1]][i][4]])
            if words[2] in index:
                for i in range(min(5,len(index[words[2]]))):
                    candidates.append([index[words[2]][i][0],index[words[2]][i][1]/10000,index[words[2]][i][2],index[words[2]][i][3],index[words[2]][i][4]])
            if words[3] in index:
                for i in range(min(5,len(index[words[3]]))):
                    candidates.append([index[words[3]][i][0],index[words[3]][i][1]/10000,index[words[3]][i][2],index[words[3]][i][3],index[words[3]][i][4]])
            if words[4] in index:
                for i in range(min(5,len(index[words[4]]))):
                    candidates.append([index[words[4]][i][0],index[words[4]][i][1]/10000,index[words[4]][i][2],index[words[4]][i][3],index[words[4]][i][4]])        
            candidates.sort(key = lambda x: x[1], reverse=True)
            for i in range(min(5,len(candidates))):
                print(candidates[i])

    return


def loop(index):
    while True:
        request = input("> ")
        if not request:
            break
        print(f"quering: {request}")
        query(index, request)


if __name__ == "__main__":
    nltk.download('wordnet')
    index = load_index("dic1_index.zstd")
    loop(index)
예제 #9
0
파일: 2dim.py 프로젝트: vitaut/informs2015
# This benchmark is run on a collection of 2-dimensional problems generated
# by combining problems from Casado and Hansen.

import couenne, lgo
from util import Config, get_problem_combinator, load_index
from common import *

inputs = get_problem_combinator(load_index("casado", "hansen"), 2)

# Timeout in seconds
timeout = 60

configs = [
    Config("lgo", {"opmode": lgo.LOCAL_SEARCH_MODE}, suffix="local-search"),
    Config("lgo", {"opmode": lgo.MULTISTART_MODE}, suffix="multistart", on_nl_file=lgo.make_maxfct_setter(2)),
    Config("lgo", {"opmode": lgo.MULTISTART_MODE}, suffix="multistart-k4", on_nl_file=lgo.make_maxfct_setter(4)),
    Config("minos"),
    Config("baron"),
    Config("couenne", couenne.options()),
]
new_vocab_unique['rank'].apply(np.log)
new_vocab_unique['logr'] = new_vocab_unique['rank'].apply(np.log)
new_vocab_unique['log_count'] = new_vocab_unique['count'].apply(np.log)
new_vocab_unique.head()
new_vocab_unique.tail()
new_vocab_unique.to_csv(index=False)
new_vocab_unique.to_csv("DATA/zif.csv",index=False)
new_vocab
new_vocab.drop("Unnamed: 0")
new_vocab
new_vocab.drop('Unnamed: 0')
new_vocab.drop(columns=['Unnamed: 0'])
new_vocab = new_vocab.drop(columns=['Unnamed: 0'])
new_vocab
from util import load_index
index = load_index("DATA/not_stop/index.pkl")
index.head()
index.sum()
tfs = index.sum(axis=1)
tfs
new_vocab.head()
tfs.values
len(tfs.values)
t = tfs.reindex(new_vocab.index)
t.head()
tfs.head()
t = tfs.reindex(new_vocab['word'])
t.head()
len(t)
len(new_vocab)
t.tail()
예제 #11
0
def test_load_index():
    index = util.load_index('cute')
    assert len(index) == 738
    assert index['cresc100']['best_obj'] == 1e-08
    assert index['cresc100']['path'] == os.path.join('cute', 'cresc100.mod')
예제 #12
0
파일: small.py 프로젝트: vitaut/informs2015
  p2gon
  pgon
  powell
  price
  qb2
  rosenbr
  s324
  s383
  schwefel
  shekel
  steenbre
  tre
  weapon
  ''')

inputs = load_index('nlmodels')
for model in inputs.keys():
  if model not in models:
    del inputs[model]
inputs.update(load_index('jdp'))

# Timeout in seconds
timeout = 60

configs = [
  Config('knitro', {'feastol': 1e-8}),
  Config('lgo', {'opmode': lgo.LOCAL_SEARCH_MODE}, suffix='local-search'),
  Config('lgo', {'opmode': lgo.MULTISTART_MODE}, suffix='multistart',
         on_nl_file=lgo.make_maxfct_setter(2))
]
예제 #13
0
def test_load_index():
  index = util.load_index('cute')
  assert len(index) == 738
  assert index['cresc100']['best_obj'] == 1e-08
  assert index['cresc100']['path'] == os.path.join('cute', 'cresc100.mod')
예제 #14
0
    parser.add_argument(
        '--hits_rel',
        type=int,
        default=5,
        help=
        'the hits here has to be <= the hits in relation prediction retrieval')
    parser.add_argument('--no_heuristics',
                        action='store_false',
                        help='do not use heuristics',
                        dest='heuristics')
    parser.add_argument('--output_dir', type=str, default="./results")
    args = parser.parse_args()
    print(args)

    ent_type = args.ent_type.lower()
    rel_type = args.rel_type.lower()
    # assert (ent_type == "crf" or ent_type == "lstm" or ent_type == "gru")
    # assert (rel_type == "lr" or rel_type == "cnn" or rel_type == "lstm" or rel_type == "gru")
    output_dir = os.path.join(args.output_dir,
                              "{}-{}".format(ent_type, rel_type))
    os.makedirs(output_dir, exist_ok=True)

    index_reach = load_index(args.index_reachpath)
    index_degrees = load_index(args.index_degreespath)
    mid2wiki = get_mid2wiki(args.wiki_path)

    test_answers = answer_rerank(args.data_path, args.ent_path, args.rel_path,
                                 output_dir, index_reach, index_degrees,
                                 mid2wiki, args.heuristics, args.hits_ent,
                                 args.hits_rel)