Esempio n. 1
0
def train(args):
    seed_everything(args.seed)

    df = pd.read_csv('data/df_mod.csv')  # use first 30 lines for debug.
    print(args)
    model = get_model(args.arch, classes=args.classes)
    if args.topK > 0:
        assert args.topK < 1, 'args.topK Err.'
        criterion = TopKLossWithBCE(args.topK)
    else:
        criterion = nn.BCEWithLogitsLoss()
    Bird = CornellBirdCall(df, model, criterion, metrics=F1(average='micro'), hparams=args)
    #lr_logger = LearningRateLogger()
    trainer = pl.Trainer(
        gpus=[0],
        max_epochs=args.epochs,
        benchmark=True,
        # accumulate_grad_batches=1,
        # log_gpu_memory='all',
        weights_save_path=f'./weights/{args.arch}',
        amp_level='O2',
        precision=16,
        checkpoint_callback = ModelCheckpoint(
		    # filepath=os.getcwd(),
		    save_top_k=-1,
		    verbose=True,
		    monitor='val_loss',
		    mode='min',
		    prefix=''
		)
        # callbacks=[lr_logger]
    )
    trainer.fit(Bird)
Esempio n. 2
0
    def _load_pretrained_model(self):
        base_dir = "/media/scatter/scatterdisk/reply_matching_model/runs/delstm_1024_nsrandom4_lr1e-3/"
        config_dir = base_dir + "config.json"
        best_model_dir = base_dir + "best_loss/best_loss.ckpt"
        model_config = load_config(config_dir)
        model_config.add_echo = False
        preprocessor = DynamicPreprocessor(model_config)
        preprocessor.build_preprocessor()

        infer_config = load_config(config_dir)
        setattr(infer_config, "tokenizer", "SentencePieceTokenizer")
        setattr(
            infer_config, "soynlp_scores",
            "/media/scatter/scatterdisk/tokenizer/soynlp_scores.sol.100M.txt")
        infer_preprocessor = DynamicPreprocessor(infer_config)
        infer_preprocessor.build_preprocessor()
        graph = tf.Graph()
        tf_config = tf.ConfigProto()
        tf_config.gpu_options.allow_growth = True

        with graph.as_default():
            Model = get_model(model_config.model)
            data = DataGenerator(preprocessor, model_config)
            infer_model = Model(data, model_config)
            infer_sess = tf.Session(config=tf_config, graph=graph)
            infer_sess.run(tf.global_variables_initializer())
            infer_sess.run(tf.local_variables_initializer())

        infer_model.load(infer_sess, model_dir=best_model_dir)
        self.infer_preprocessor = infer_preprocessor
        return infer_model, infer_sess
    def build_graph(self, name="train"):
        graph = tf.Graph()
        tf_config = tf.ConfigProto()
        tf_config.gpu_options.allow_growth = True

        with graph.as_default():
            self.logger.info("Building {} graph...".format(name))
            Model = get_model(self.config.model)
            model = Model(self.preprocessor, self.config)
            sess = tf.Session(config=tf_config, graph=graph)
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())
            if (self.config.checkpoint_dir) and (name == "train"):
                self.logger.info('Loading checkpoint from {}'.format(
                    self.checkpoint_dir))
                model.load(sess)
                self.global_step = model.global_step_tensor.eval(sess)
        return model, sess
Esempio n. 4
0
    def get_pretrained_model(self):
        NAME = "delstm_1024_nsrandom4_lr1e-3"
        TOKENIZER = "SentencePieceTokenizer"
        base_dir = "/media/scatter/scatterdisk/reply_matching_model/runs/{}/".format(
            NAME)
        config_dir = base_dir + "config.json"
        best_model_dir = base_dir + "best_loss/best_loss.ckpt"

        model_config = load_config(config_dir)
        model_config.add_echo = False
        graph = tf.Graph()
        tf_config = tf.ConfigProto()
        tf_config.gpu_options.allow_growth = True

        with graph.as_default():
            Model = get_model(model_config.model)
            data = DataGenerator(self.preprocessor, model_config)
            infer_model = Model(data, model_config)
            infer_sess = tf.Session(config=tf_config, graph=graph)
            infer_sess.run(tf.global_variables_initializer())
            infer_sess.run(tf.local_variables_initializer())

        infer_model.load(infer_sess, model_dir=best_model_dir)
        return infer_model, infer_sess
Esempio n. 5
0
    infer_config = load_config(config_dir)
    setattr(infer_config, "tokenizer", TOKENIZER)
    setattr(infer_config, "soynlp_scores",
            "/media/scatter/scatterdisk/tokenizer/soynlp_scores.sol.100M.txt")
    infer_preprocessor = DynamicPreprocessor(infer_config)
    infer_preprocessor.build_preprocessor()

    model_config.add_echo = False

    graph = tf.Graph()
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True

    with graph.as_default():
        Model = get_model(model_config.model)
        data = DataGenerator(preprocessor, model_config)
        infer_model = Model(data, model_config)
        infer_sess = tf.Session(config=tf_config, graph=graph)
        infer_sess.run(tf.global_variables_initializer())
        infer_sess.run(tf.local_variables_initializer())

        infer_model.load(infer_sess, model_dir=best_model_dir)

    with open("../reply_matching_model/data/reply_set_new.txt", "r") as f:
        reply_set = [line.strip() for line in f if line]
    indexed_reply_set, reply_set_lengths = zip(
        *[infer_preprocessor.preprocess(r) for r in reply_set])

    def get_result(query, reply):
        preprocessed_query, query_length = infer_preprocessor.preprocess(query)
Esempio n. 6
0
#!/usr/bin/env python

import py_path
from utils.arp_list import get_arp_list
from global_settings import settings
from models.session import session
#from models.all_models import Users, InetEther
from models.base import get_model

ARPCache = get_model('arp_cache.ARPCache')

if __name__ == "__main__":
    nlist = []
    for s, ip, mac in get_arp_list('rl0'):
        print "RES: ", ip, mac
        ac = ARPCache(ip=ip, mac=mac)
        nlist.append(ac)
    session.query(ARPCache).filter().delete()
    for ac in nlist:
        session.add(ac)
    session.commit()
Esempio n. 7
0
import json
import tornado.web
import os, sys
import logging

from decorators import returnstatus

from models.session import session
from models.base import get_model

InetEther = get_model('inet_ether.InetEther')

from libs.pfctl import PFCtl


class ipAccessHandler(tornado.web.RequestHandler):
    def initialize(self):
        pass

    @returnstatus
    def get(self, ip):
        logging.debug('REQUEST: %s' % self.request.body)
        client = session.query(InetEther).filter(InetEther.is_active == True)
        client = client.filter(InetEther.ip == ip).first()

        if client is None:
            return 404

        proxy = None
        if client.access_type in PFCtl.ip_proxy_types:
            proxy = PFCtl.ip_proxy_types[client.access_type]
Esempio n. 8
0
def main():
    config = args.parse_args()
    logger = setup_logger()
    base_dir = "/media/scatter/scatterdisk/reply_matching_model/runs/{}/".format(
        config.name)
    model_config_path = base_dir + "config.json"
    model_path = base_dir + "best_loss/best_loss.ckpt"
    model_config = load_config(model_config_path)
    preprocessor = Preprocessor(model_config)

    graph = tf.Graph()
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True

    logger.info("Loading model : {}".format(config.name))
    with graph.as_default():
        Model = get_model(model_config.model)
        data = Dataset(preprocessor,
                       model_config.train_dir,
                       model_config.val_dir,
                       model_config.min_length,
                       model_config.max_length,
                       model_config.num_negative_samples,
                       model_config.batch_size,
                       model_config.shuffle,
                       model_config.num_epochs,
                       debug=False)
        infer_model = Model(data,
                            model_config,
                            mode=tf.contrib.learn.ModeKeys.EVAL)
        infer_sess = tf.Session(config=tf_config, graph=graph)
        infer_sess.run(tf.global_variables_initializer())
        infer_sess.run(tf.local_variables_initializer())

    infer_model.load(infer_sess, model_dir=model_path)
    graph_path = os.path.join(config.save_dir, config.name)

    logger.info("Writing graph at {}".format(graph_path))
    tf.train.write_graph(infer_sess.graph, graph_path, "graph.pbtxt")
    logger.info("Done")

    maybe_output_node_names = [
        "positive_distances", "queries_encoded", "replies_encoded", "probs",
        "predictions"
    ]
    delete = [
        "increment_global_step_tensor", "increment_cur_epoch_tensor",
        "learning_rate", "labels", "loss", "accuracy",
        "dense_dropout_keep_prob"
    ]

    key2node_name = dict()
    output_node_names = []
    for name, att in vars(infer_model).items():
        if isinstance(att, tf.Tensor):
            key2node_name[name] = "import/" + att.name
            for node_name in maybe_output_node_names:
                if node_name in name:
                    output_node_names.append(att.name)

    for trash in delete:
        if trash in key2node_name:
            del key2node_name[trash]

    json.dump(
        key2node_name,
        open(
            os.path.join(config.save_dir, config.name,
                         "key_to_node_name.json"), "w"))
    output_node_names = list(
        set([name.split(":")[0] for name in output_node_names]))

    logger.info("Freezing...")
    freeze_graph.freeze_graph(
        input_graph=os.path.join(graph_path, "graph.pbtxt"),
        input_saver="",
        input_binary=False,
        input_checkpoint=model_path,
        output_node_names=",".join(output_node_names),
        restore_op_name="save/restore_all",
        filename_tensor_name="save/Const:0",
        output_graph=os.path.join(config.save_dir, config.name, "model"),
        clear_devices=True,
        initializer_nodes='string_to_index/hash_table/table_init')
    logger.info("Done.")
Esempio n. 9
0
import cron.py_path
from global_settings import settings
from models.session import session
#from models.all_models import Users, InetEther
from models.base import get_model

Users = get_model('users.Users')

#u = Users(login='******')
#session.add(u)
#session.commit()
#session.query(Users).filter(Users.login=='admin').delete()

print settings
print session