Пример #1
0
    def __init__(self, model_path, corpus_path, emb_path):
        raw_corpus = myio.read_corpus(corpus_path)
        embedding_layer = myio.create_embedding_layer(
                    raw_corpus,
                    n_d = 10,
                    cut_off = 1,
                    embs = load_embedding_iterator(emb_path)
                )
        weights = myio.create_idf_weights(corpus_path, embedding_layer)
        say("vocab size={}, corpus size={}\n".format(
                embedding_layer.n_V,
                len(raw_corpus)
            ))

        model = Model(args=None, embedding_layer=embedding_layer,
                    weights=weights)

        model_data = model.load_model(model_path)
        model.set_model(model_data)
        model.dropout.set_value(0.0)
        say("model initialized\n")

        score_func = theano.function(
                inputs = [ model.idts, model.idbs ],
                outputs = model.scores,
                on_unused_input='ignore'
            )
        self.model = model
        self.score_func = score_func
        say("scoring function compiled\n")
Пример #2
0
    def __init__(self, model_path, corpus_path, emb_path):
        raw_corpus = myio.read_corpus(corpus_path)
        embedding_layer = myio.create_embedding_layer(
            raw_corpus,
            n_d=10,
            cut_off=1,
            embs=load_embedding_iterator(emb_path))
        weights = myio.create_idf_weights(corpus_path, embedding_layer)
        say("vocab size={}, corpus size={}\n".format(embedding_layer.n_V,
                                                     len(raw_corpus)))

        model = Model(args=None,
                      embedding_layer=embedding_layer,
                      weights=weights)

        model_data = model.load_model(model_path)
        model.set_model(model_data)
        model.dropout.set_value(0.0)
        say("model initialized\n")

        score_func = theano.function(inputs=[model.idts, model.idbs],
                                     outputs=model.scores,
                                     on_unused_input='ignore')
        self.model = model
        self.score_func = score_func
        say("scoring function compiled\n")
Пример #3
0
def compose(stage):
    setattr(Model, 'train_dataloader', train_dataloader)
    setattr(Model, 'val_dataloader', val_dataloader)
    # setattr(Model, 'test_dataloader', test_dataloader)
    setattr(Model, 'build_loss', build_loss)
    setattr(Model, 'configure_optimizers', configure_optimizers)
    setattr(Model, 'training_step', training_step)
    setattr(Model, 'validation_step', validation_step)

    model = Model(stage)
    model.build_loss()
    return model
Пример #4
0
    def test_is_cell_alive(self):
        model = Model()
        pattern = [
            [0, 1, 0],
            [0, 0, 0],
            [0, 0, 0],
        ]
        model.init_from_array(pattern)

        _i = list(range(1, model.grid_size + 1))
        _expect = [x == 2 for x in _i]  # only i=2 is True

        for (index, expect_value) in zip(_i, _expect):
            r = model.is_cell_alive(index)
            try:
                self.assertEqual(expect_value, r)
            except AssertionError as e:
                print(f"i = {index}")
                raise e
Пример #5
0
def objective_fn(**kwargs):
    data_config, nn_config, total_intervals = make_model(**kwargs)

    df = pd.read_csv('data/nk_data.csv')

    model = Model(
        data_config=data_config,
        nn_config=nn_config,
        data=df,
        # intervals=total_intervals
    )

    model.build_nn()

    idx = np.arange(720)
    tr_idx, test_idx = train_test_split(idx, test_size=0.5, random_state=313)

    history = model.train_nn(indices=list(tr_idx))
    return np.min(history['val_loss'])
Пример #6
0
    def calculate(self, alg):
        """
        :param alg:  name of algorithm to use
        """

        if alg == 'erlang':
            try:
                traffic = self.checkEnabled(self.ids.traffic_input)
                lines = self.checkEnabled(self.ids.lines_input)
                block = self.checkEnabled(self.ids.block_rate_input)

                model = Model(traffic, lines, block)
                self.ids.result.text = str(model.calculate_erlang())
            except ValueError:
                self.popup.open()
        if alg == 'engset':
            try:
                traffic = self.checkEnabled(self.ids.engset_traffic)
                lines = self.checkEnabled(self.ids.engset_lines)
                block = self.checkEnabled(self.ids.engset_block)
                sources = self.checkEnabled(self.ids.sources)

                model = Model(traffic, lines, block, sources)
                self.ids.engset_result.text = str(model.calculate_engset())
            except ValueError:
                self.popup.open()
Пример #7
0
class Tests(unittest.TestCase):
    def setUp(self):
        self.model_p = Model(traffic=3.4, lines=10, blocking_rate=False)
        self.model_n = Model(traffic=3.4, lines=False, blocking_rate=0.0019)
        self.model_a = Model(traffic=False, lines=10, blocking_rate=0.0019)
        self.engset_p = Model(traffic=3.4,
                              lines=10,
                              blocking_rate=False,
                              sources=500)
        self.engset_n = Model(traffic=3.4,
                              lines=False,
                              blocking_rate=0.0018,
                              sources=500)
        self.engset_a = Model(traffic=False,
                              lines=10,
                              blocking_rate=0.0018,
                              sources=500)

    def test_erlang_p(self):
        self.assertEqual(self.model_p.calculate_erlang(), 0.0019)

    def test_erlang_n(self):
        self.assertEqual(self.model_n.calculate_erlang(), 10)

    def test_erlang_a(self):
        self.assertEqual(self.model_a.calculate_erlang(), 3.5)

    def test_engset_p(self):
        self.assertAlmostEqual(self.engset_p.calculate_engset(), 0.0018)

    def test_engset_n(self):
        self.assertEqual(self.engset_n.calculate_engset(), 10)

    def test_engset_a(self):
        self.assertAlmostEqual(self.engset_a.calculate_engset(), 3.4)
Пример #8
0
def objective_func(batch_size,
                   lookback,
                   lr,
                   lstm_units,
                   lstm_act='relu',
                   cnn_act='relu',
                   filters=64):

    n_epochs = 5000

    data_config, nn_config, args, intervals, verbosity = make_model(
        int(batch_size), int(lookback), n_epochs, lr, lstm_act, cnn_act,
        lstm_units, filters)

    model = Model(data_config=data_config,
                  nn_config=nn_config,
                  args=args,
                  intervals=intervals,
                  verbosity=verbosity)

    model.build_nn()
    model.train_nn()

    mse = np.min(model.losses['val_losses']['mse'])

    reset_graph()

    return mse
Пример #9
0
    def test__neighbours_gen(self):
        model = Model()
        pattern = [
            [0, 1, 0],
            [0, 0, 0],
            [0, 0, 0],
        ]
        model.init_from_array(pattern)

        expect = [1, 0, 1, 1, 1, 1, 0, 0, 0]

        for i in range(1, 7 + 1):
            _y, _x = model.index_to_row_col(i)
            n = model._neighbours_gen(_y, _x)
            r = sum(n)
            try:
                self.assertEqual(expect[i - 1], r)
            except AssertionError as e:
                print(f"i = {i}")
                print(f"_y, _x = {_y, _x}")
                raise e
Пример #10
0
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
from torchvision import transforms

from torchvision.models import resnet18, resnet34

import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint


from main import Model


if __name__ == "__main__":
    f = "epoch=9.ckpt"
    model = Model.load_from_checkpoint(f"./checkpoints/{f}")

    model = model.model

    model = nn.Sequential(
        model,
        nn.Softmax(1)
    )

    model.eval()
    example = torch.rand(1, 3, 224, 224)
    traced_script_module = torch.jit.trace(model, example)
    traced_script_module.save("android/model.pt")

    print(model(example).shape)
Пример #11
0
        print("ERROR, eval_bleu get empty input or un-match inputs")
        return

    if type(hyps_resp[0]) != list:
        print(
            "ERROR, eval_distinct takes in a list of <class 'list'>, get a list of {} instead"
            .format(type(hyps_resp[0])))
        return

    return corpus_bleu(ref_resp,
                       hyps_resp,
                       smoothing_function=SmoothingFunction().method1)


if __name__ == '__main__':
    model = Model()

    if len(sys.argv) < 3:
        print('Too few args for this script')

    random_test = sys.argv[1]
    biased_test = sys.argv[2]
    random_test_data = read_dialog(random_test)
    biased_test_data = read_dialog(biased_test)
    random_ref_resp = [[j.split() for j in i['golden_response']]
                       for i in random_test_data]
    biased_ref_resp = [[j.split() for j in i['golden_response']]
                       for i in biased_test_data]

    for i in random_test_data:
        if 'golden_response' in i:
Пример #12
0
                prob_true /= (sum((probs.get(k, 0) for k in vocab)) + eos_probs)
                loss -= math.log(prob_true)
            else:
                loss = float('inf')
            num_tokens += 1
        else:
            num_unk += 1
    probs, eos_probs = model.next_word_probability(context, resp_gt)
    eos_probs /= (sum((probs.get(k, 0) for k in vocab)) + eos_probs)
    loss -= math.log(eos_probs)
    num_tokens += 1
    return loss / num_tokens, math.exp(loss / num_tokens)


if __name__ == '__main__':
    model = Model()
    if len(sys.argv) < 4:
        print('Too few args for this script')

    vocab_file = sys.argv[1]
    random_test = sys.argv[2]
    biased_test = sys.argv[3]

    with codecs.open(vocab_file, 'r', 'utf-8') as f:
        vocab = set([i.strip() for i in f.readlines() if len(i.strip()) != 0])
    random_test_data = read_dialog(random_test)
    biased_test_data = read_dialog(biased_test)

    random_ppl = 0
    biased_ppl = 0
def predict_basin(
    basin: str,
    run_dir: Union[str, Path],
    camels_dir: Union[str, Path],
    period: str = "train",
    epoch: int = 30,
):
    if isinstance(run_dir, str):
        run_dir = Path(run_dir)
    elif not isinstance(run_dir, Path):
        raise TypeError(f"run_dir must be str or Path, not {type(run_dir)}")
    if isinstance(camels_dir, str):
        camels_dir = Path(camels_dir)
    elif not isinstance(camels_dir, Path):
        raise TypeError(f"run_dir must be str or Path, not {type(camels_dir)}")

    with open(run_dir / "cfg.json", "r") as fp:
        run_cfg = json.load(fp)

    if not period in ["train", "val"]:
        raise ValueError("period must be either train or val")
    basins = get_basin_list()
    db_path = str(run_dir / "attributes.db")
    attributes = load_attributes(db_path=db_path,
                                 basins=basins,
                                 drop_lat_lon=True)
    means = attributes.mean()
    stds = attributes.std()
    attrs_count = len(attributes.columns)
    timeseries_count = 6
    input_size_stat = timeseries_count if run_cfg["no_static"] else attrs_count
    input_size_dyn = (timeseries_count if
                      (run_cfg["no_static"] or not run_cfg["concat_static"])
                      else timeseries_count + attrs_count)
    model = Model(
        input_size_dyn=input_size_dyn,
        input_size_stat=input_size_stat,
        hidden_size=run_cfg["hidden_size"],
        dropout=run_cfg["dropout"],
        concat_static=run_cfg["concat_static"],
        no_static=run_cfg["no_static"],
    ).to(DEVICE)

    # load trained model
    weight_file = run_dir / f"model_epoch{epoch}.pt"
    model.load_state_dict(torch.load(weight_file, map_location=DEVICE))

    ds_test = CamelsTXT(
        camels_root=camels_dir,
        basin=basin,
        dates=[
            GLOBAL_SETTINGS[f"{period}_start"],
            GLOBAL_SETTINGS[f"{period}_end"]
        ],
        is_train=False,
        seq_length=run_cfg["seq_length"],
        with_attributes=True,
        attribute_means=means,
        attribute_stds=stds,
        concat_static=run_cfg["concat_static"],
        db_path=db_path,
    )
    date_range = ds_test.dates_index[run_cfg["seq_length"] - 1:]
    loader = DataLoader(ds_test, batch_size=1024, shuffle=False, num_workers=4)
    preds, obs = evaluate_basin(model, loader)
    df = pd.DataFrame(data={
        "qobs": obs.flatten(),
        "qsim": preds.flatten()
    },
                      index=date_range)

    results = df
    # plt.plot(date_range, results["qobs"], label="Obs")
    # plt.plot(date_range, results["qsim"], label="Preds")
    # plt.legend()
    # plt.savefig(f"{run_dir}/pred_basin_{basin}.pdf")
    # plt.close()
    return results, date_range
Пример #14
0
    # Import parameters for scenario
    scenario_mod = importlib.import_module("params.scenarios.full")
    print("scenarios.params_sensitivity: ", scenario_mod.params_scenario)

    # Import const parameters
    const_file = "default_consts"  # "default_consts"  # "default_consts"  # "single_agent_consts" "alternative_consts"
    consts_mod = importlib.import_module("params.consts." + const_file)
    print("const file", const_file)

    # Seed
    seed = 1
    consts_mod.params_const["gridpoints_x"] = 75
    consts_mod.params_const["gridpoints_y"] = 50

    # === RUN ===
    m_test = Model("Map/", int(seed), consts_mod.params_const,
                   sa_mod.params_sensitivity, scenario_mod.params_scenario)

    # === PLOT ====
    # Plot Map for Farming Productivity Index
    title = "Arability"
    ax = plot_map(m_test.map,
                  m_test.map.arability_c * 100,
                  r"Arability Index $a(c)$",
                  cmap_arability,
                  0.01,
                  100,
                  arability_legend=True)
    ax.set_title("")
    plt.savefig("Map/" + title + ".pdf", bbox_inches="tight", pad_inches=0)

    # Plot Map for Trees.
Пример #15
0
 def setUp(self):
     self.model_p = Model(traffic=3.4, lines=10, blocking_rate=False)
     self.model_n = Model(traffic=3.4, lines=False, blocking_rate=0.0019)
     self.model_a = Model(traffic=False, lines=10, blocking_rate=0.0019)
     self.engset_p = Model(traffic=3.4,
                           lines=10,
                           blocking_rate=False,
                           sources=500)
     self.engset_n = Model(traffic=3.4,
                           lines=False,
                           blocking_rate=0.0018,
                           sources=500)
     self.engset_a = Model(traffic=False,
                           lines=10,
                           blocking_rate=0.0018,
                           sources=500)
Пример #16
0
import time
import vk_api

from main import Model
from parse import parse
from compare import compare
from collections import Counter

list_group_overhear = [80178582]

test = Model(login='******', passw='*')

parser = parse()
compare = compare()

my_group_response = test.get_subscribe(11853247)

# получает людей через подслушано
people_response = test.get_local_people()

# очишает его
people = parse.parse_response_people_id(people_response)

# парсит его
my_groups = parse.parse_group(my_group_response)

print(">>>>---<<<<<")
print(my_groups)
lal = []
for white in range(len(my_groups)):
    lal.extend(test.isMembers(my_groups[white], people))
Пример #17
0
app = Flask(__name__)



@app.route("/", methods=['POST','GET'])
def index():
    if (request.method == "POST"):
        data = request.form['content']
        # run()
        
        try:
            numbers = list(map(lambda x: float(x),  data.split(',')))
            output = single_predict(model, numbers)
        except:
            
            output = "You have to write 4 numbers separated by commas"
            
        return render_template('index.html', value = output)
    else:
        return render_template('index.html')







if __name__ == '__main__':
    model = Model()
    model.load_state_dict(torch.load('models/model_v1'))
    app.run(debug=True)
Пример #18
0
from data import loading_data
from main import Model


def image(filename):
    image = cv2.imread(filename, cv2.IMREAD_COLOR)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    img = cv2.resize(gray, (28, 28), interpolation=cv2.INTER_AREA)
    img = img.reshape(1, 28, 28, 1)
    img = img / 255.0

    return img


if __name__ == "__main__":
    model = Model(input_shape=(28, 28, 1), classes=10)
    train_ds, test_ds = loading_data()
    model.compile(
        learning_rate=0.01,
        optimizer='sgd',
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))
    model.fit(epochs=15, train_ds=train_ds, test_ds=test_ds)
    #filename='images/9.jpg'
    #img = image(filename)
    #pred = nn.predict(img)
    #final_pred = np.argmax(pred)
    #print(pred)
    #print(final_pred)
    model.save('digit_model.h5')
Пример #19
0
    def generate(self, graph):
        x = []
        y = []
        inputs = []
        for i in range(self.lin):
            y.append([])
        try:
            if graph == 'lines':
                if self.lin == 1:
                    inputs.append(int(self.ids.line_one_input.text))
                elif self.lin == 2:
                    inputs.append(int(self.ids.line_one_input.text))
                    inputs.append(int(self.ids.line_two_input.text))
                elif self.lin == 3:
                    inputs.append(int(self.ids.line_one_input.text))
                    inputs.append(int(self.ids.line_two_input.text))
                    inputs.append(int(self.ids.line_three_input.text))
                elif self.lin == 4:
                    inputs.append(int(self.ids.line_one_input.text))
                    inputs.append(int(self.ids.line_two_input.text))
                    inputs.append(int(self.ids.line_three_input.text))
                    inputs.append(int(self.ids.line_four_input.text))
                step = float(self.ids.spinner_id.text)
                n_steps = int(floor((float(self.ids.max_traffic.text) - float(self.ids.min_traffic.text)) / step))
                x.append(float(self.ids.min_traffic.text))
                for i in range(n_steps):
                    x.append(x[i] + step)
                    print(x[i])
                for i in range(self.lin):
                    for j in range(n_steps + 1):
                        model = Model(traffic=x[j], lines=inputs[i], blocking_rate=False)
                        y[i].append(model.calculate_erlang())
                        print(y[i])
            elif graph == 'block':
                if self.lin == 1:
                    inputs.append(float(self.ids.block_one_input.text))
                elif self.lin == 2:
                    inputs.append(float(self.ids.block_one_input.text))
                    inputs.append(float(self.ids.block_two_input.text))
                elif self.lin == 3:
                    inputs.append(float(self.ids.block_one_input.text))
                    inputs.append(float(self.ids.block_two_input.text))
                    inputs.append(float(self.ids.block_three_input.text))
                elif self.lin == 4:
                    inputs.append(float(self.ids.block_one_input.text))
                    inputs.append(float(self.ids.block_two_input.text))
                    inputs.append(float(self.ids.block_three_input.text))
                    inputs.append(float(self.ids.block_four_input.text))
                step = int(self.ids.spinner2_id.text)
                n_steps = int(floor((int(self.ids.max_lines.text) - int(self.ids.min_lines.text)) / step))
                x.append(int(self.ids.min_lines.text))
                print(n_steps)
                print(x)
                for i in range(n_steps):
                    x.append(x[i] + step)
                for i in range(self.lin):
                    for j in range(n_steps+1):
                        model = Model(traffic=False, lines= x[j], blocking_rate=inputs[i])
                        y[i].append(model.calculate_erlang())
                        print(y[i])

            generate_graph(x, y, inputs, graph)
        except ValueError:
            raise
Пример #20
0
import pytest

from main import cube_vertices
from main import tex_coord
from main import Model
from main import normalize 

m = Model()

#Test the verticies and bounds, why is this useful?
class mainTests():
    position = (10,10,10)
    
    assert normalize(position) == (10,10,10)
    assert normalize(position) != (11,9,10)
    
    position = (1,-1,100)
    
    assert normalize(position) == (1,-1,100)
    assert normalize(position) != (1,1,100)
    
    #test that the cube is defined correctly
        #x,y,z,n
        #x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n,  # top
        #x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n,  # bottom
        #x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n,  # left
        #x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n,  # right
        #x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n,  # front
        #x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n,  # back
        
    assert cube_vertices(1,1,1,1) ==  [0, 2, 0, 0, 2, 2, 2, 2, 2, 2, 2, 0,
Пример #21
0
import time
import vk_api

from main import Model
from parse import parse
from compare import compare
from collections import Counter


list_group_overhear = [80178582]


test = Model(login="******", passw="*")

parser = parse()
compare = compare()

my_group_response = test.get_subscribe(11853247)

# получает людей через подслушано
people_response = test.get_local_people()

# очишает его
people = parse.parse_response_people_id(people_response)

# парсит его
my_groups = parse.parse_group(my_group_response)

print(">>>>---<<<<<")
print(my_groups)
lal = []
Пример #22
0
    def __len__(self):
        return self.features.shape[0]

    def __getitem__(self, idx):
        if torch.is_tensor(idx):
            idx = idx.tolist()

        sample = self.features[idx]
        return sample, sample


if __name__ == "__main__":
    dataset = RepresentationDataset(nuclear_charge=1)
    vae_structure = VAE(dataset.features.shape[-1], layer_size=128, n_layers=2, variant=3,
        dimensions=2, activation=F.leaky_relu)
    model = Model(dataset=dataset, model=vae_structure, epochs=50, learning_rate=3e-3, batch_size=100,
        log_interval=100)
    print(3e-2)
    model.fit()

    model.set_learning_rate(1e-2)
    print(1e-2)
    model.epochs = 30
    model.fit()

    model.set_learning_rate(3e-3)
    print(3e-3)
    model.epochs = 90
    model.fit()

    model.set_learning_rate(1e-3)
    print(1e-3)
Пример #23
0
import re
from collections import defaultdict
from data import Synthesize
from matplotlib import pyplot as plt
from matplotlib.ticker import MultipleLocator
from gurobipy import GRB
from main import Model
from analysis import plot_results
from os import path, makedirs

model = Model('Sensibility analysis of original model')
# model.AE = 0.95
# model.AZ *= 0.5
# model.AC *= 0.8
# model.AQ *= 2
# model.P *= 1.5
#
# for period in model.D_t:
#     model.D_t[period] *= 1.2
# for source in model.sources:
#     if source in ['coal', 'oil', 'gas']:
#         model.QM_i[source] *= 0.7

if __name__ == '__main__':
    model.optimize()
    folder = 'storage_improves'
    dirname = path.join('sens_analysis_results', folder)
    if not path.exists(dirname):
        plot_dir = path.join(dirname, 'plots')
        gurobi_files_dir = path.join(dirname, 'gurobi_files')
        makedirs(plot_dir)
Пример #24
0
if __name__ == '__main__':
    #key is date
    #  key = sys.argv[1]
    #test_datas = load_predict_data('./rb1710/rb1710(1小时).csv')
    path = './data20190623/test_i9888_60min.csv'
    dates, imgs, labels = load_predict_data(
        './data20190623/test_i9888_60min.csv')
    #imgs = np.expand_dims(test_datas[key], axis=0)
    print dates.shape, imgs.shape
    #  pdb.set_trace()

    image = tf.placeholder(tf.float32, [None, 128, 6])
    label = tf.placeholder(tf.float32, [None, 3])
    dropout = tf.placeholder(tf.float32)
    model = Model(image, label, dropout=dropout)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    #exit(0)

    glob_step = 'data-00000-of-00001'  #10001
    saver = tf.train.Saver()
    #save_path = 'models/i9888_60min_model/i9888_60min_best_validation.ckpt.{}'.format(glob_step)
    save_path = 'checkpoints/i9888_60min_best_validation.ckpt.{}'.format(
        glob_step)

    with tf.Session(config=config) as sess:
        #sess.run(tf.global_variables_initializer())
        #saver.restore(sess, save_path)
        saver = tf.train.import_meta_graph(
            "./models/i9888_60min_model/i9888_60min_best_validation.ckpt.meta")