Esempio n. 1
0
from core.features.layers import wrap_extractor, get_default_processing
from core.models.input import SquareInput

# load config
options = args("fc")
config = importlib.import_module("configs." + options.config)
config = apply_options(config, options)

# data featurizers
extractor = config.extractor
featurizer = wrap_extractor(extractor)
preprocess = lambda cluster: (featurizer(cluster), cluster["nparts"], cluster[
    "props"])

# get data
train_examples, dev_set, test_set, test_raw = make_datasets(
    config, preprocess, preprocess)

# data processing
processing = get_default_processing(train_examples, extractor,
                                    preprocess_y(1, 3),
                                    config.preprocessing_mode)

# model
model = SquareInput(config, config.n_eta, config.n_phi, config.n_features)
model.build("light")

path1 = "results/20170318_161623/model.weights/"
path2 = "results/20170318_234936/model.weights/"

model.combine(path1, path2, processing, test_set)
Esempio n. 2
0
    ReLu, Conv2d, MaxPool, Combine, Reduce, Embedding

# load config
options = args("embeddings")
config = importlib.import_module("configs." + options.config)
config = apply_options(config, options)

# data featurizer
featurizer = embedding_features(config.modes)
preprocess = lambda cluster: (featurizer(cluster), cluster["nparts"], cluster[
    "props"])
featurizer_raw = wrap_extractor(config.extractor)
preprocess_raw = lambda cluster: (featurizer_raw(cluster), cluster["nparts"])

# get data
train_examples, dev_set, test_set, test_raw = make_datasets(
    config, preprocess, preprocess_raw)

# data processing
processing = get_default_processing(train_examples, config.n_features,
                                    preprocess_y(1, 3), config.max_n_cells,
                                    config.pad_tok, config.preprocessing_mode)

# model
model = EmbeddingsInput(config)
model.build("light")

path1 = "results/20170318_165602/model.weights/"
path2 = "results/20170318_234736/model.weights/"

model.combine(path1, path2, processing, test_set)
Esempio n. 3
0
from core.utils.evaluate import baseline, f1score
from core.utils.data import get_xy
import tensorflow as tf

# load config
options = args("baseline")
config = importlib.import_module("configs." + options.config)
config = apply_options(config, options)

# data extraction
featurizer = simple_features(config.tops, config.feature_mode)
preprocess = lambda cluster: (featurizer(cluster), cluster["nparts"], cluster[
    "props"])
featurizer_raw = wrap_extractor(config.extractor)
preprocess_raw = lambda cluster: (featurizer_raw(cluster), cluster["nparts"])

# get data
train_examples, dev_set, test_set, test_raw = make_datasets(
    config, preprocess, preprocess_raw)

# data processing
processing = get_default_processing(train_examples, preprocess_y(1, 3),
                                    config.preprocessing_mode)

# model
model = FlatInput(config, config.input_size)
model.build("light")
path1 = "results/20170318_132042/model.weights/"
path2 = "results/20170318_234539/model.weights/"

model.combine(path1, path2, processing, test_set)
Esempio n. 4
0
from core.models.input import FlatInput
from core.utils.evaluate import baseline
from core.utils.data import get_xy

# load config
options = args("baseline")
config = importlib.import_module("configs." + options.config)
config = apply_options(config, options)

# data extraction
featurizer = simple_features(config.tops, config.feature_mode)
preprocess = lambda cluster: (featurizer(cluster), cluster["nparts"], cluster[
    "props"])
featurizer_raw = wrap_extractor(config.extractor)
preprocess_raw = lambda cluster: (featurizer_raw(cluster), cluster["nparts"])

# get data
train_examples, dev_set, test_set, test_raw = make_datasets(
    config, preprocess, preprocess_raw)

# data processing
processing = get_default_processing(
    train_examples, preprocess_y(config.part_min, config.output_size),
    config.preprocessing_mode)

# model
model = FlatInput(config, config.input_size)
model.build()
model.train(train_examples, dev_set, processing)
acc, base = model.evaluate(test_set, processing, test_raw,
                           featurized_export_result)
Esempio n. 5
0
# data featurizer
featurizer = embedding_features(config.modes)
preprocess = lambda cluster: (featurizer(cluster), cluster["nparts"], cluster[
    "props"])
featurizer_raw = wrap_extractor(config.extractor)
preprocess_raw = lambda cluster: (featurizer_raw(cluster), cluster["nparts"])

# get data
train_examples, dev_set, test_set, test_raw = make_datasets(
    config, preprocess, preprocess_raw)

# data processing
processing = get_default_processing(
    train_examples, config.n_features,
    preprocess_y(config.part_min, config.output_size), config.max_n_cells,
    config.pad_tok, config.preprocessing_mode)

# model
model = EmbeddingsInput(config)
model.build()
model.train(train_examples, dev_set, processing)
acc, base = model.evaluate(test_set, processing)
export_clustering(model,
                  config.embedding_node,
                  test_set,
                  processing,
                  config,
                  default=True,
                  n_components=3)
Esempio n. 6
0
from core.utils.evaluate import featurized_export_result
from core.utils.preprocess import preprocess_y
from core.dataset.pickle import make_datasets
from core.features.layers import wrap_extractor, get_default_processing
from core.models.input import SquareInput


# load config
options = args("fc")
config = importlib.import_module("configs."+options.config)
config = apply_options(config, options)

# data featurizers
extractor = config.extractor
featurizer = wrap_extractor(extractor)
preprocess = lambda cluster: (featurizer(cluster), cluster["nparts"], cluster["props"])

# get data
train_examples, dev_set, test_set, test_raw = make_datasets(config, preprocess, preprocess)

# data processing
processing = get_default_processing(train_examples, extractor, 
    preprocess_y(config.part_min, config.output_size), config.preprocessing_mode)

# model
model = SquareInput(config, config.n_eta, config.n_phi, config.n_features)
model.build()
model.train(train_examples, dev_set, processing)
acc, base = model.evaluate(test_set, processing, test_raw, featurized_export_result)

Esempio n. 7
0
# get data
train_examples, dev_set, test_set, test_raw = make_datasets(
    config, preprocess, preprocess_raw)
all_ids = set()
for (x, y), i in train_examples:
    all_ids.update(x["ids"])

# dynamically allocate the vocab size
config.n_cells = max(all_ids) + 3
config.unk_tok_id = config.n_cells - 1
config.pad_tok_id = config.n_cells - 2
for layer in config.layers:
    if layer.__class__.__name__ == "Embedding":
        print "Setting vocab_size to {}".format(config.n_cells)
        layer.vocab_size = config.n_cells

# data processing
processing = get_default_processing(train_examples, config.n_features,
                                    preprocess_y(config.output_size),
                                    config.max_n_cells, config.n_cells - 3,
                                    config.pad_tok_id, config.unk_tok_id,
                                    config.pad_tok_feat, "none")

# model
model = IdInput(config)
model.build()
model.train(train_examples, dev_set, processing)
acc, base = model.evaluate(test_set, processing, test_raw,
                           featurized_export_result)