def __init__(self,
                 args,
                 train_set=None,
                 val_set=None,
                 idxs_train=None,
                 idxs_val=None):

        self.logger = get_logger("ClientUpdate")
        self.args = args
        self.loss_func = nn.NLLLoss()

        self.lr = 1e-5
        if self.args.lr is not None:
            self.lr = self.args.lr

        #self.logger.debug(f"Learning rate: {self.lr}")

        self.selected_clients = []
        self.train_set = DatasetSplit(train_set, idxs_train)
        #dataset_length = len(self.train_val_set)
        #self.train_set, _ = torch.utils.data.random_split(self.train_val_set,[round(args.train_frac*dataset_length),round((1-args.train_frac)*dataset_length)],generator=torch.Generator().manual_seed(23))
        self.ldr_train = DataLoader(self.train_set,
                                    batch_size=self.args.local_bs,
                                    shuffle=True)

        self.val_set = DatasetSplit(val_set, idxs_val)
        self.ldr_val = DataLoader(self.val_set, batch_size=1, shuffle=True)
Example #2
0
def parse_city(file_path):
    """Parse input data string from file

    :Parameters:
        -`file_path`: path to txt file
    """
    logger = util.get_logger()
    logger.debug('Reading data from %s' % file_path)
    content = util.read_data_from_file(file_path)
    data_dict = _create_dict_from_txt(content)
    logger.debug('Set information from %s to db' % file_path)
    db_util.set_region_and_city(data_dict)
    logger.debug('Parse information from file %s complete success' % file_path)
Example #3
0
def parse_district(file_path, city_id):
    """Parse input data string from file

    :Parameters:
        -`file_path`: path to txt file
        -`city_id`: id city in db
    """
    logger = util.get_logger()
    logger.debug('Reading data from %s' % file_path)
    content = util.read_data_from_file(file_path)
    data_dict = _create_dict_from_txt(content)
    logger.debug('Set information from %s to db' % file_path)
    db_util.set_district_and_street(data_dict, city_id)
    logger.debug('Parse information from file %s complete success' % file_path)
Example #4
0
def train():
    logger = get_logger("./logger")
    writer = SummaryWriter("./temp.tb")


    train_loader, val_loader = None, None
    test_loader = None

    model = None

    criterion = None
    optimizer = get_optimizer(model)
    scheduler = get_lr_scheduler(optimizer)

    trainer = Trainer(criterion, optimizer, scheduler, logger, writer)
    trainer.train_loop(train_loader, val_loader, test_loader, model)
Example #5
0
def main():
    global args
    args = parser.parse_args()

    workpath = osp.abspath(osp.dirname(__file__))
    with open(osp.join(workpath, args.config)) as f:
        if yaml.__version__ == '5.1':
            config = yaml.load(f, Loader=yaml.FullLoader)
        else:
            config = yaml.load(f)

    for key in config:
        for k, v in config[key].items():
            setattr(args, k, v)

    model_file = 'best.pth'
    model_path = osp.join(osp.dirname(__file__), 'checkpoints', args.trial_log)

    img_size = args.img_size
    data_transform = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Resize((img_size, img_size)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    test_dataset = PASCAL_VOC(
        data_root=args.data_root, 
        img_prefix='VOC2007', 
        ann_file='VOC2007/ImageSets/Main/test.txt',
        transform=data_transform,
        num_debug_imgs=args.num_debug_imgs,
        test_mode=True)

    logger = get_logger(args.trial_log, model_path)
    calc_map(logger, test_dataset, model_path, model_file,
             args.size_grid_cell, args.num_boxes, args.num_classes,
             args.conf_thresh, args.iou_thresh, args.nms_thresh)
Example #6
0
def main():
    parser = argparse.ArgumentParser(description='Parse data from file.')
    parser.add_argument('-id', '--city_id', dest='city_id', type=int,
                        required=True, help='please write city_id from your db')
    parser.add_argument('-f','--file', dest='filename', type=str,
                        required=True, help='please write path to file')
    parser.add_argument('-v','--verbosity', dest='verbosity', type=int,
                        choices=[0, 1, 2, 3], default=3,
                        help='please chouse verbosity level (0, 1, 2, 3)')

    args = parser.parse_args()
    util.change_verbosity(args.verbosity)
    logger = util.get_logger()
    logger.info('Start process...')
    args.filename = os.path.abspath(args.filename)

    if not os.path.exists(args.filename):
        logger.error('File %s doesn\'t exist' % args.filename)
    elif db_util.get_city_by_id(args.city_id) is None:
        logger.error('Object with id = %s doesn\'t exist' % args.city_id)
    else:
        parse_content.parse_district(args.filename, args.city_id)
def main():
    parser = argparse.ArgumentParser(description="Parse data from file.")
    parser.add_argument("-f", "--file", dest="filename", type=str, required=True, help="please write path to file")
    parser.add_argument(
        "-v",
        "--verbosity",
        dest="verbosity",
        type=int,
        choices=[0, 1, 2, 3],
        default=3,
        help="please chouse verbosity level (0, 1, 2, 3)",
    )

    args = parser.parse_args()
    util.change_verbosity(args.verbosity)
    logger = util.get_logger()
    logger.info("Start process...")
    args.filename = os.path.abspath(args.filename)

    if not os.path.exists(args.filename):
        logger.error("File %s doesn't exist" % args.filename)
    else:
        parse_content.parse_city(args.filename)
Example #8
0
    model.eval()

    i = 0
    time_spent = []
    while i < 100:
        start_time = time.time()
        with torch.no_grad():
            _ = model(inputs)
        if i != 0:
            time_spent.append(time.time() - start_time)
        i += 1
    print('Avg execution time (ms): {:.3f}'.format(np.mean(time_spent)))


logger = get_logger()
model = LeNet(
)  #DeepFMs.DeepFMs(field_size=23, feature_sizes=[1], logger=logger)

no_non_sparse = 0
for name, param in model.named_parameters():
    no_non_sparse += (param != 0).sum().item()
print(no_non_sparse)
computeTime(model)

prune.ln_structured(model.fc1, name="weight", amount=0.5, n=2, dim=0)
prune.remove(model.fc1, 'weight')

no_non_sparse = 0
for name, param in model.named_parameters():
    no_non_sparse += (param != 0).sum().item()
Example #9
0
        help='the path of config file for training (default: 64)')
    argparses = parser.parse_args()
    args = Option(argparses.conf_path)
    args.set_save_path()

    # args = parse_args()
    best_val_acc_list = []
    logger = None
    temp = args.outpath
    for i in range(1, args.repeat + 1):
        if args.repeat != 1:
            args.outpath = temp + "_{:02d}".format(i)

        output_process(args.outpath)
        write_settings(args)
        logger = get_logger(args.outpath,
                            'attention_transfer_{:02d}'.format(i))
        if i == 1:
            args.copy_code(logger, dst=os.path.join(args.outpath, 'code'))

        val_acc = train_net(args, logger, seed=(args.seed + i))
        best_val_acc_list.append(val_acc)

    acc_mean = np.mean(best_val_acc_list)
    acc_std = np.std(best_val_acc_list)
    for i in range(len(best_val_acc_list)):
        print_str = 'repeat={}\tbest_val_acc={}'.format(
            i, best_val_acc_list[i])
        logger.info(print_str)
    logger.info('All repeat val_acc_mean={}\tval_acc_std={})'.format(
        acc_mean, acc_std))
def rename_keys(d):
    return {n: v for n, (k, v) in enumerate(d.items())}


def weights_init(m):
    if isinstance(m, torch.nn.Conv2d):
        torch.nn.init.xavier_uniform_(m.weight)
        #torch.nn.init.xavier_uniform(m.bias.data)
    elif isinstance(m, torch.nn.Linear):
        #torch.nn.init.xavier_uniform(m.weight)
        m.bias.data.fill_(0.01)


if __name__ == '__main__':

    mylogger = get_logger("fl-moe")

    args = args_parser()

    filename = args.filename
    filexist = os.path.isfile('save/' + filename)
    if (not filexist):
        with open('save/' + filename, 'a') as f1:
            f1.write(
                'dataset;model;epochs;local_ep;num_clients;iid;p;opt;n_data;train_frac;train_gate_only;val_acc_avg_e2e;val_acc_avg_e2e_neighbour;val_acc_avg_locals;val_acc_avg_fedavg;ft_val_acc;val_acc_avg_3;val_acc_avg_rep;val_acc_avg_repft;acc_test_mix;acc_test_locals;acc_test_fedavg;ft_test_acc;ft_train_acc;train_acc_avg_locals;val_acc_gateonly;overlap;run'
            )

            f1.write('\n')

    # TODO: print warnings if arguments are not used (p, overlap)
    for run in range(args.runs):
Example #11
0
from intercom import Intercom, Conversation
import os
import html2text
from utils.util import get_logger

__author__ = 'Deyang'

intercomm_logger = get_logger('intercomm')

Intercom.app_id = os.environ.get('INTERCOM_APP_ID')
Intercom.app_api_key = os.environ.get('INTERCOM_APP_API_KEY')

ADMIN_DEREK_ID = '426663'
ADMIN_BOT_ID = '426928'

SUBSCRIBED_REPLY_TOPIC = 'conversation.user.replied'
SUBSCRIBED_CREATE_TOPIC = 'conversation.user.created'


def reply_to_user(conversation_id, reply_msg):
    conversation = Conversation.find(id=conversation_id)
    conversation.reply(
        type='admin', id=str(ADMIN_BOT_ID),
        message_type='comment', body=reply_msg)


def parse_notification_and_should_reply(notification):
    intercomm_logger.debug("Raw notification: %s" % notification)
    try:
        conversation_id = notification['data']['item']['id']
        assignee = notification['data']['item']['assignee']
Example #12
0
import argparse
import json

from crawler_hdu.login import IHDU
from crawler_hdu.service import ElectiveService
from utils.util import get_logger

logger = get_logger(__name__)
CONFIG_FILE = 'config.json'
DEV_FILE = 'test.json'


def check_config(file_path):
    """
    检查配置文件,若有问题给出提示,否则返回每种选课的必要数据。
    :param file_path: 配置文件路径
    :return:
    """
    elective = None
    sport = None
    pt = None
    config = None
    try:
        with open(file_path, 'r', encoding="utf-8") as f:
            config = json.load(f)
    except Exception:
        logger.error('没有找到配置文件,或者配置有误,请先配置好再重试。更多可以查看 Json 文件格式。', exc_info=1)
        exit(1)
    common = dict()
    # TODO: 检查每一项,给出提示
    common['username'] = config.get('username')
Example #13
0
            improved_str = ''
        time_dif = get_time_dif(start_time)
        msg = 'Epoch: {0:>6},Train Loss: {1:>6.6}, Train Ppl: {2:>6.6},' \
              + ' Val loss: {3:>6.6}, Val Ppl: {4:>6.6},Time:{5} {6}'
        print(
            msg.format(epoch + 1, train_loss, train_ppl, valid_loss, valid_ppl,
                       time_dif, improved_str))
        if epoch - last_improved > require_improvement:
            print("No optimization for a long time, auto-stopping...")
            break
    return 1


if __name__ == "__main__":
    if args["pretrain"]:
        logger = get_logger("Pretrain Language Model")
    else:
        logger = get_logger("Sense Generator (Single)")
        logger.info("Definiton Vocab Size: %d" % args["vocab_size"])
        logger.info("Use Seed: %s" % args["use_seed"])
        if args["use_input"]:
            logger.info("Use Input: True")
        if args["use_hidden"]:
            logger.info("Use Hidden: True")
        if args["use_gated"]:
            logger.info("Use Gated: True")
        if args["use_input_adaptive"]:
            logger.info("Use Input Adaptive: True")
        if args["use_input_attention"]:
            logger.info("Context Vocab Size: %d" % args["n_attn_tokens"])
            logger.info("Use Attention Type: %s" % args["att_type"])
Example #14
0
def main():
    global args
    args = parser.parse_args()

    workpath = osp.abspath(osp.dirname(__file__))
    with open(osp.join(workpath, args.config)) as f:
        if yaml.__version__ == '5.1':
            config = yaml.load(f, Loader=yaml.FullLoader)
        else:
            config = yaml.load(f)

    for key in config:
        for k, v in config[key].items():
            setattr(args, k, v)

    # seed settings
    torch.manual_seed(0)
    torch.cuda.manual_seed_all(0)

    # logger and checkpoint settings
    model_path = osp.join(workpath, 'checkpoints', args.trial_log)
    if not osp.exists(model_path):
        os.makedirs(model_path)
    logger = get_logger(args.trial_log, model_path)
    logger.info(f'args: {args}')

    # model settings
    model = resnet50_yolov1(pretrained=True)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    if torch.cuda.device_count() > 1:
        num_gpus = torch.cuda.device_count()
        logger.debug(f'Use {num_gpus} GPUs!')
        model = nn.DataParallel(model)
        
        # adjust `batch_size` and `burn_in` 
        args.batch_size *= num_gpus
        args.burn_in /= num_gpus
        # args.learning_rate *= num_gpus
    model.to(device)
    optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)

    start_epoch = 0
    iter_num = 0
    if args.resume:
        try:
            checkpoint = torch.load(osp.join(model_path, 'latest.tar'))
        except:
            raise FileNotFoundError

        model.load_state_dict(checkpoint['model_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        start_epoch = checkpoint['epoch']
        iter_num = checkpoint['iter_num']

    # model statistics
    summary(model, input_size=(3, args.img_size, args.img_size), batch_size=args.batch_size)

    # dataset settings
    data_transform = transforms.Compose([
            transforms.ToPILImage(),
            transforms.Resize((args.img_size, args.img_size)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                std=[0.229, 0.224, 0.225])
        ])

    # load training dataset
    img_prefixs = args.img_prefix if isinstance(args.img_prefix, list) else [args.img_prefix]
    train_dataset = PASCAL_VOC(
        data_root=args.data_root,
        img_prefix=img_prefixs,
        ann_file=[f'{img_prefix}/ImageSets/Main/trainval.txt' for img_prefix in img_prefixs],
        transform=data_transform,
        size_grid_cell=args.size_grid_cell,
        with_difficult=args.with_difficult,
        do_augmentation=args.do_augmentation)
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)

    # load validation/testing dataset
    val_dataset = PASCAL_VOC(
        data_root=args.data_root,
        img_prefix='VOC2007', 
        ann_file='VOC2007/ImageSets/Main/test.txt',
        transform=data_transform,
        size_grid_cell=args.size_grid_cell,
        with_difficult=args.with_difficult,
        test_mode=True)
    val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)

    logger.info('training dataset: {}'.format(len(train_dataset)))
    logger.info('validation dataset: {}'.format(len(val_dataset)))
    dataloaders = {'train': train_loader, 'val': val_loader}

    # loss function
    criterion = YoloV1Loss(device, args.size_grid_cell, 
        args.num_boxes, args.num_classes, args.lambda_coord, args.lambda_noobj)
    train_model(model, criterion, optimizer, dataloaders, model_path, start_epoch, iter_num, logger, device)
Example #15
0
import json

from chatterbot import ChatBot
from wit import message
import re
import os
from utils.util import get_logger

__author__ = 'Deyang'

bot_logger = get_logger('bot')
wit_token = 'QUCDCX7MQX4FLYGONBEYLGDHKSTIUFTQ'


# identity_intent = [conversation[0] for conversation in identity_data]
# ask_company_intent = [conversation[0] for conversation in ask_company_data]
# ask_customer_intent = [conversation[0] for conversation in ask_customer_data]
# ask_doc_intent = [conversation[0] for conversation in ask_doc_data]
# ask_price_intent = [conversation[0] for conversation in ask_price_data]
# ask_product_intent = [conversation[0] for conversation in ask_product_data]
# ask_story_intent = [conversation[0] for conversation in ask_story_data]


GREETING_INTENT = 'greetings'
IDENTITY_INTENT = 'identity'
TRY_INTENT = 'try'
EMAIL_INTENT = 'email'
INSULT_AND_SEX_INTENT = 'insult_and_sex'
ASK_PRICE_INTENT = 'ask_price'
ASK_CUSTOMER_INTENT = 'ask_customer'
ASK_TEAM_INTENT = 'ask_team'
Example #16
0
from sklearn.metrics import accuracy_score

from model import DeepFMs
from utils import data_preprocess
from utils.parameters import get_parser
from utils.util import get_model, load_model_dic, get_logger
from model.Datasets import Dataset, get_dataset

import torch
from torchsummary import summary

if __name__ == '__main__':
    parser = get_parser()
    pars = parser.parse_args()

    logger = get_logger('Quantization')
    logger.info(pars)

    field_size, train_dict, valid_dict, test_dict = get_dataset(pars)

    if not pars.save_model_path:
        logger.info("no model path given: -save_model_path")
        sys.exit()

    model = get_model(field_size=field_size,
                      cuda=pars.use_cuda and torch.cuda.is_available(),
                      feature_sizes=train_dict['feature_sizes'],
                      pars=pars,
                      logger=logger)
    model = load_model_dic(model, pars.save_model_path, sparse=pars.prune)
Example #17
0
import json

from chatterbot import ChatBot
from wit import message
import re
import os
from utils.util import get_logger

__author__ = 'Deyang'

bot_logger = get_logger('bot')
wit_token = 'QUCDCX7MQX4FLYGONBEYLGDHKSTIUFTQ'

# identity_intent = [conversation[0] for conversation in identity_data]
# ask_company_intent = [conversation[0] for conversation in ask_company_data]
# ask_customer_intent = [conversation[0] for conversation in ask_customer_data]
# ask_doc_intent = [conversation[0] for conversation in ask_doc_data]
# ask_price_intent = [conversation[0] for conversation in ask_price_data]
# ask_product_intent = [conversation[0] for conversation in ask_product_data]
# ask_story_intent = [conversation[0] for conversation in ask_story_data]

GREETING_INTENT = 'greetings'
IDENTITY_INTENT = 'identity'
TRY_INTENT = 'try'
EMAIL_INTENT = 'email'
INSULT_AND_SEX_INTENT = 'insult_and_sex'
ASK_PRICE_INTENT = 'ask_price'
ASK_CUSTOMER_INTENT = 'ask_customer'
ASK_TEAM_INTENT = 'ask_team'
ASK_LAUNCH_INTENT = 'ask_launch'
ASK_BETA_INTENT = 'ask_beta'
Example #18
0
                        required=True)
    parser.add_argument("--title",
                        type=str,
                        help="experiment title",
                        required=True)
    args = parser.parse_args()

    CONFIG = get_config(args.cfg)

    if CONFIG.cuda:
        device = torch.device("cuda" if (
            torch.cuda.is_available() and CONFIG.ngpu > 0) else "cpu")
    else:
        device = torch.device("cpu")

    get_logger(CONFIG.log_dir)
    writer = get_writer(args.title, CONFIG.write_dir)

    logging.info(
        "=================================== Experiment title : {} Start ==========================="
        .format(args.title))

    set_random_seed(CONFIG.seed)

    train_transform, val_transform, test_transform = get_transforms(CONFIG)
    train_dataset, val_dataset, test_dataset = get_dataset(
        train_transform, val_transform, test_transform, CONFIG)
    train_loader, val_loader, test_loader = get_dataloader(
        train_dataset, val_dataset, test_dataset, CONFIG)

    generator = get_generator(CONFIG, 21 * 8)
Example #19
0
            improved_str = '*'
        else:
            improved_str = ''
        time_dif = get_time_dif(start_time)
        msg = 'Epoch: {0:>6},Train Loss: {1:>6.6}, Train Ppl: {2:>6.6},' \
              + ' Val loss: {3:>6.6}, Val Ppl: {4:>6.6},Time:{5} {6}'
        print(msg.format(epoch + 1, train_loss, train_ppl, valid_loss, valid_ppl, time_dif, improved_str))
        if epoch - last_improved > require_improvement:
            print("No optimization for a long time, auto-stopping...")
            break
    return 1


if __name__ == "__main__":
    if args["pretrain"]:
        logger = get_logger("Pretrain Language Model")
    else:
        logger = get_logger("Gated Context-Aware Network")
        logger.info("Definiton Vocab Size: %d" % args["vocab_size"])
        logger.info("Use Seed: %s" % args["use_seed"])
        if args["use_gated"]:
            logger.info("Use Gated: True")
        if args["use_input_adaptive"]:
            logger.info("Use Input Adaptive: True")
        if args["use_input_attention"] or args["use_context_interaction"]:
            if args["use_input_attention"]:
                logger.info("Use Input Attention: True")
            elif args["use_context_interaction"]:
                logger.info("Use Context Interaction: True")
            logger.info("Context Vocab Size: %d" % args["n_attn_tokens"])
        if args["lm_ckpt"]:
    parser.add_argument('--filename',
                        default=[],
                        help='configuration filename',
                        action="append")
    parser.add_argument('--dry-run', action='store_true', help='do not fire')
    parser.add_argument('--experiment',
                        type=str,
                        default='result',
                        help='output path')
    return parser.parse_args()


if __name__ == "__main__":

    args = args_parser()
    mylogger = get_logger("Iterator")

    mylogger.debug(args)
    # Loop over multiple files

    gpus = get_available_gpus()
    number_of_gpus = len(gpus)
    mylogger.debug(f"gpus: {gpus}")

    for filename in args.filename:

        config = read_config(filename)
        flags = config.pop("flags")
        # for clusters in range(1, config["clusters"] + 1  )

        if config["dataset"] == "femnist":
Example #21
0
from tornado.httpserver import HTTPServer
import tornado.options
from tornado.web import RequestHandler
from copy import deepcopy
import json
import time

from utils.database_ import connect_db, insert_data, user_find, group_find, device_find
from utils.util import get_logger, timestamp2strtime

import multiprocessing
import ws_server as wb_server
from ws_server import data_put

# 获取日志对象
logger = get_logger("er_log")

############ get collection ##################
# 连接数据库,获取指定集合
try:
    mydb = connect_db('localhost:27017', 'hy_bitbox')  #, 'event')
    mycol = mydb["event"]
    logger.info("collection:{}".format(mycol))

    usercol = mydb['user_info']
    logger.info("user collection:{}".format(usercol))
    devicecol = mydb['device']
    logger.info("collection:{}".format(devicecol))
except Exception as e:
    logger.error(str(e))
Example #22
0
        task2_msg = 'Task2: {0:>6},Valid Loss: {1:>6.6}, Vliad Ppl: {2:>6.6} {3}'
        print(
            task1_msg.format(args["label_type"][0], valid1_loss, valid1_ppl) +
            "\n")
        print(
            task2_msg.format(args["label_type"][1], valid2_loss, valid2_ppl,
                             improved_str) + "\n")
        if epoch - last_improved > require_improvement:
            print("No optimization for a long time, auto-stopping...")
            break
    return 1


if __name__ == "__main__":
    if args["type"] == "same_level":
        logger = get_logger("Sense Generator (Share Embedding)")
    elif args["type"] == "hir_level":
        logger = get_logger("Sense Generator (Hir Shared)")
    logger.info("Definiton Vocab Size: %d" % args["vocab_size"])
    logger.info("Use Seed: %s" % args["use_seed"])
    if args["use_input_attention"]:
        logger.info("Context Vocab Size: %d" % args["n_attn_tokens"])
        logger.info("Use Attention Type: %s" % args["att_type"])
        if args["att_type"] == "ScaledDot":
            logger.info("Gated Scaled Dot Attention : %s" % args["att_gate"])
    if args["lm_ckpt"]:
        logger.info("Use Pretrained Language Model: True")
    logger.info("Use Char Embedding: %s" % args["use_ch"])
    logger.info("Use Elmo Embedding: %s" % args["use_elmo"])
    logger.info("RNN Intial Hidden State: %s" % args["intial_hidden"])
    train()
Example #23
0
__author__ = 'Deyang'
from utils.util import get_logger

engine_logger = get_logger('ir_query_engine')
Example #24
0
import utils.util as util
from options import options

import os

from CIFARTrainer import Trainer

if __name__ == '__main__':

    opt = options.parse()
    util.mkdirs(os.path.join(opt.checkpoints_dir, opt.name))
    logger = util.get_logger(
        os.path.join(opt.checkpoints_dir, opt.name, 'logger.log'))

    Trainer(opt, logger).train()
Example #25
0
    random.seed(pars.random_seed)
    torch.manual_seed(pars.random_seed)
    torch.cuda.manual_seed(pars.random_seed)

    save_model_name = './saved_models/' + pars.c + '_l2_' + str(pars.l2) + '_dt_' + pars.dataset

    if pars.prune:
        save_model_name = save_model_name + '_sparse_' + str(pars.sparse) + '_seed_' + str(pars.random_seed)
    if pars.emb_bag and not pars.qr_emb:
        save_model_name = save_model_name + '_emb_bag'
    if pars.qr_emb:
        save_model_name = save_model_name + '_qr'

    save_model_name = save_model_name + '_' + datetime.now().strftime("%Y%m%d%H%M%S")

    logger = get_logger(save_model_name[14:])
    logger.info(pars)

    logger.info("GET DATASET")
    field_size, train_dict, valid_dict, test_dict = get_dataset(pars)

    model = get_model(field_size=field_size, cuda=pars.use_cuda and torch.cuda.is_available(), feature_sizes=train_dict['feature_sizes'], pars=pars, logger=logger)
    #summary(model, [(train_dict['index'].shape[1], 1), (train_dict['value'].shape[1], )], dtypes=[torch.long, torch.float], device=torch.device("cpu"))

    if pars.use_cuda and torch.cuda.is_available():
        torch.cuda.empty_cache()
        #logger.info(torch.cuda.memory_summary(device=None, abbreviated=False))
        model = model.cuda()

    model.fit(train_dict['index'], train_dict['value'], train_dict['label'], valid_dict['index'],
              valid_dict['value'], valid_dict['label'],
Example #26
0
def main(params):
    logger = util.get_logger('prepare')
    # preprocess
    preprocess.ChatbotDataPreprocess(data_dir=params['data_dir'],
                                     sent_len=params['sent_len'])
Example #27
0
from model.Datasets import Dataset, get_dataset

import torch
import warnings

"""
source: https://github.com/peterliht/knowledge-distillation-pytorch
"""
#warnings.filterwarnings("ignore")

if __name__ == '__main__':
    parser = get_parser()
    pars = parser.parse_args()

    logger = get_logger('Knowledge Distillation')
    logger.info(pars)

    if not pars.save_model_path:
        logger.error("no model path given: -save_model_path")
        sys.exit()

    field_size, train_dict, valid_dict, test_dict = get_dataset(pars)

    # teacher
    model = get_model(field_size=field_size, cuda=pars.use_cuda and torch.cuda.is_available(), feature_sizes=train_dict['feature_sizes'], pars=pars, logger=logger)
    model = load_model_dic(model, pars.save_model_path)

    # student
    number_of_deep_nodes = 400
    h_depth = 2