Beispiel #1
0
def test(args):

  set_seed(args.seed)

  train_loader, val_loader, test_loader = get_data_loaders(args)

  model = get_model(args)
  criterion = get_criterion(args)

  logger = create_logger('%s/logfile.log' % args.savedir, args)
  logger.info(model)
  model.cuda()

  start_epoch, global_step, n_no_improve, best_metric = 0, 0, 0, -np.inf

  if os.path.exists(os.path.join(args.savedir, "checkpoint.pt")):
    checkpoint = torch.load(os.path.join(args.savedir, "checkpoint.pt"))
    start_epoch = checkpoint["epoch"]
    n_no_improve = checkpoint["n_no_improve"]
    best_metric = checkpoint["best_metric"]
    model.load_state_dict(checkpoint["state_dict"])

  # Test best model
  load_checkpoint(model, os.path.join(args.savedir, "model_best.pt"))
  model.eval()
  test_metrics = model_eval(
    np.inf, test_loader, model, args, criterion, store_preds=True
  )
  log_metrics(f"Test - test", test_metrics, args, logger)
Beispiel #2
0
def get_train_params(params):
    trainer_params = params.copy(
    )  # create separate trainer_params as can't pickle model if checkpoint_callback is passed to hparams in model
    model_dir = Path(
        f"../results/linear_probing/resnet_50/layer_{hparams['layer_name']}")
    model_dir.mkdir(parents=True, exist_ok=True)
    checkpoint_callback = ModelCheckpoint(filepath=model_dir,
                                          save_top_k=10,
                                          verbose=True,
                                          monitor='val_loss',
                                          mode='min')

    trainer_params['checkpoint_callback'] = checkpoint_callback
    if not params['disable_logger']:
        experiment_name = 'finetuning_resnet_' + str(params['layer_name'])
        trainer_params['logger'] = create_logger(experiment_name)
    if params['debug']:
        trainer_params.update({
            'limit_train_batches': 0.01,
            'limit_val_batches': 0.02,
            'limit_test_batches': 0.03
        })

    trainer_params = argparse.Namespace(**trainer_params)
    return trainer_params
Beispiel #3
0
def main():
    args = parse_args()
    configs = Config.fromfile(args.config)
    if args.work_dir is not None:
        configs.work_dir = args.work_dir

    assert osp.exists(args.model), "Model {} not exists".format(args.model)

    logger = create_logger()
    logger.info(configs.text)

    dataset_config = configs.data
    test_dataset = eval('datasets.{}.build_dataset'.format(
        dataset_config.test.type))(dataset_config, is_training=False)
    test_loader = DataLoader(test_dataset,
                             batch_size=dataset_config.batch,
                             shuffle=True,
                             num_workers=dataset_config.num_workers)

    model = eval('models.{}.build_model'.format(configs.model.name))(configs)
    model = torch.nn.DataParallel(model, device_ids=configs.gpu_group).cuda()
    model.load_state_dict(torch.load(args.model))

    # Test model
    valid_model(configs, model, test_loader)
Beispiel #4
0
 def __init__(self):
     self.database = EsDatabase()
     self.alphavantage = Alphavantage()
     self.tickers = config.TICKERS
     self.interval = config.INTERVAL
     self.logger = create_logger('Feeder')
     pass
Beispiel #5
0
def get_train_params(params):
    trainer_params = params.copy(
    )  # create separate trainer_params as can't pickle model if checkpoint_callback is passed to hparams in model
    experiment_type = 'segmentation' if params['segmentation'] else 'detection'
    model_dir = Path(f"../results/models/{params['model_name']}")

    model_dir = model_dir / experiment_type / trainer_params['experiment_name']
    model_dir.mkdir(parents=True, exist_ok=True)
    checkpoint_callback = ModelCheckpoint(
        # filepath=f'{model_save_path}_best_model.pth',
        filepath=model_dir,
        save_top_k=10,
        verbose=True,
        monitor='val_loss',
        mode='min')

    trainer_params['checkpoint_callback'] = checkpoint_callback
    if not params['disable_logger']:
        experiment_name = 'finetuning_' + params[
            'model_name'] + '_' + experiment_type
        trainer_params['logger'] = create_logger(experiment_name)
    if params['debug']:
        trainer_params.update({
            'limit_train_batches': 0.01,
            'limit_val_batches': 0.02,
            'limit_test_batches': 0.03
        })

    trainer_params = argparse.Namespace(**trainer_params)
    return trainer_params
Beispiel #6
0
 def __init__(self):
     self.database = EsDatabase()
     self.algorithms = [
         SVRAlgorithm(),
         RandAlgorithm(),
     ]
     self.tickers = config.TICKERS
     self.logger = create_logger('Predictor')
Beispiel #7
0
def _custom_tf_logger(args):
    # Set custom logger
    log_dir = Path(args.model_dir) / "logs"
    if not log_dir.exists():
        log_dir.mkdir(parents=True, exist_ok=True)

    log_file = log_dir / "{}_{}".format(args.mode, args.tag)
    logging._logger = create_logger(log_file=log_file, with_time=True,
                                    clear_exist_handlers=True, name="tensorflow")
Beispiel #8
0
def main(options):
    logs_dir = options.logs_dir
    require_empty(logs_dir, recreate=options.overwrite)
    logging_filename = os.path.join(logs_dir, "train.log")
    # train on multiple gpu
    if len(options.gpu) == 0:
        device = torch.device('cpu')
        prefetch_data = False
    elif len(options.gpu) == 1:
        device = torch.device('cuda:{}'.format(options.gpu[0]))
        prefetch_data = True
    else:
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(options.gpu)
        device = torch.device('cuda:{}'.format(options.gpu[0]))
        prefetch_data = True
        parallel = True
    logger = create_logger(logging_filename, options.verbose)
    logger.info("Called with parameters: {}".format(options.__dict__))

    #***************#
    if options.net == 'path':
        # val_data = PathDataset(options.val_dir, options.im_w, options.im_h)
        val_data = SingleDataset(options.val_dir, options.im_w, options.im_h)
    else:
        val_data = OverlapDataset(options.val_dir, options.im_w, options.im_h)
    val_loader = DataLoader(val_data,
                            batch_size=options.val_batch_size,
                            shuffle=True,
                            num_workers=4)
    #***************#

    logger.info('Total number of val samples: ~{}'.format(
        len(val_loader) * options.val_batch_size))

    #***************#
    if options.net == 'path':
        model = PathNet(options.repeat_num,
                        options.conv_hidden_num,
                        input_channel=2).to(device)
    else:
        model = PathNet(options.repeat_num,
                        options.conv_hidden_num,
                        last_activation='sigmoid',
                        input_channel=1).to(device)
    cp_dict = {
        "model": model
        # 'optimizer': optimizer
    }
    #***************#
    logger.info_trainable_params(model)
    if options.init_model_filename:
        load_model(options.init_model_filename, cp_dict)
    #***************#
    metrics = [[iou, "iou"]]
    val_loss = validate(val_loader, model, logger, metrics, logs_dir)
Beispiel #9
0
 def create_user(self, user_exist = False):
     user_name = 'test' if user_exist else input("请输入您的用户名:")
     user_path = os.path.join(ROOT_DIR, 'logs', user_name)
     log_path = os.path.join(user_path, 'log')
     if not os.path.exists(user_path):
         os.mkdir(user_path)
         os.mkdir(log_path)
     self.record_savedir = os.path.join(ROOT_DIR, user_path, 'dialogs.txt')
     log_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
     self.log_savedir = log_path +'/' + log_name + '.log'
     self.logger = create_logger(self.log_savedir, FLAGS.print)
     return user_name
Beispiel #10
0
def main():
    """Fill the application described by the command line arguments.
  Example commands:
  'python3 run.py -hd -d' to run in debug mode with headless mode disabled
  'python3 run.py -m 2' to run in graph generation mode
  """
    LOGGER = create_logger('LOGGER')

    args = parse_args()
    is_debug = args.debug
    is_headless = not (args.headless)
    mode = args.mode

    if mode == 1:
        LOGGER.info('Running Leetcode scraping task...')
        genProblemsJSON(LOGGER, is_headless, is_debug)
    elif mode == 2:
        LOGGER.info('Running Graph generating task...')
        genGraphJSON()
    LOGGER.info('Done')

    return 0
def initialization():
    """
    Initializing arguments, logger, tensorboard recorder and json files.
    """
    import argparse
    from torch.utils.tensorboard import SummaryWriter
    from utils import file_operator as f_op
    from utils import logger as log_util
    from utils import seed
    from utils.config import Config
    import environ

    # create config object from arguments
    args = environ.to_config(Config)

    # create logger
    logger_ = log_util.create_logger("main", "./files/output/logs", args.save_key, args.log_level)
    log_util.logger_ = logger_

    # show specified arguments
    logger_.info("*** ARGUMENTS ***")
    logger_.info(args)

    # create TensorBoard writer
    board_root_dir = f"./files/output/board/{args.save_key}"
    f_op.create_folder(board_root_dir)
    log_util.writer_ = SummaryWriter(board_root_dir)

    # load model config
    logger_.info("** LOAD MODEL CONFIG **")
    config = f_op.load_json("./files/input/models/configs", args.model_config_key)
    logger_.info(config)

    # set flg of using seeds
    if args.is_seed:
        seed.feed_seed = True

    return args, config
Beispiel #12
0
def initialization():
    """
    Initializing arguments, logger, tensorboard recorder and json files.
    """
    from torch.utils.tensorboard import SummaryWriter
    from utils import file_operator as f_op
    from utils import logger as log_util
    import environ
    from config.config_grad_cam import ConfigGradCAM

    # create config object from arguments
    args = environ.to_config(ConfigGradCAM)

    # create logger
    logger_ = log_util.create_logger("main", "./files/output/logs",
                                     args.save_key, args.log_level)
    log_util.logger_ = logger_

    # show specified arguments
    logger_.info("*** ARGUMENTS ***")
    logger_.info(args)

    return args
Beispiel #13
0
        # %%
        if config.dataset not in dataset2path:
            raise ValueError("dataset {} is not registered.".format(
                config.dataset))
        else:
            train_path = dataset2path[config.dataset]["train"]
            test_path = dataset2path[config.dataset]["test"]
            label_path = dataset2path[config.dataset]["test_label"]
        # %%
        device = torch.device("cuda:1")

        train_logger, file_logger, meta_logger = create_logger(
            dataset=args.dataset,
            h_dim=config.h_dim,
            rolling_size=config.rolling_size,
            train_logger_name='donut_train_logger',
            file_logger_name='donut_file_logger',
            meta_logger_name='donut_meta_logger',
            model_name='DONUT',
            pid=args.pid)

        # logging setting
        file_logger.info('============================')
        for key, value in vars(args).items():
            file_logger.info(key + ' = {}'.format(value))
        file_logger.info('============================')

        meta_logger.info('============================')
        for key, value in vars(args).items():
            meta_logger.info(key + ' = {}'.format(value))
        meta_logger.info('============================')
Beispiel #14
0
import tensorflow as tf
import numpy as np
import time
from utils.logger import create_logger

logger = create_logger(__name__)


def kinetic_energy(v):
    return 0.5 * tf.reduce_sum(tf.multiply(v, v), axis=1)


def hamiltonian(p, v, f):
    """
    Return the value of the Hamiltonian
    :param p: position variable
    :param v: velocity variable
    :param f: energy function
    :return: hamiltonian
    """
    return f(p) + kinetic_energy(v)


def metropolis_hastings_accept(energy_prev, energy_next):
    """
    Run Metropolis-Hastings algorithm for 1 step
    :param energy_prev:
    :param energy_next:
    :return: Tensor of boolean values, indicating accept or reject
    """
    energy_diff = energy_prev - energy_next
Beispiel #15
0
            config.export_config(
                './config/{}/Config_LOF_hdim_{}_rollingsize_{}_pid={}.json'.
                format(config.dataset, config.n_neighbors, 1, config.pid))
        # %%
        if config.dataset not in dataset2path:
            raise ValueError("dataset {} is not registered.".format(
                config.dataset))
        else:
            train_path = dataset2path[config.dataset]["train"]
            test_path = dataset2path[config.dataset]["test"]
            label_path = dataset2path[config.dataset]["test_label"]

        train_logger, file_logger, meta_logger = create_logger(
            dataset=args.dataset,
            train_logger_name='lof_train_logger',
            file_logger_name='lof_file_logger',
            meta_logger_name='lof_meta_logger',
            model_name='LOF',
            pid=args.pid)

        # logging setting
        file_logger.info('============================')
        for key, value in vars(args).items():
            file_logger.info(key + ' = {}'.format(value))
        file_logger.info('============================')

        meta_logger.info('============================')
        for key, value in vars(args).items():
            meta_logger.info(key + ' = {}'.format(value))
        meta_logger.info('============================')
Beispiel #16
0
                                 server_run=args.server_run, robustness=args.robustness, pid=args.pid, save_results=args.save_results)
        if args.save_config:
            if not os.path.exists('./config/{}/'.format(config.dataset)):
                os.makedirs('./config/{}/'.format(config.dataset))
            config.export_config('./config/{}/Config_OCSVM_pid={}.json'.format(config.dataset, config.pid))
        # %%
        if config.dataset not in dataset2path:
            raise ValueError("dataset {} is not registered.".format(config.dataset))
        else:
            train_path = dataset2path[config.dataset]["train"]
            test_path = dataset2path[config.dataset]["test"]
            label_path = dataset2path[config.dataset]["test_label"]
        # %%
        train_logger, file_logger, meta_logger = create_logger(dataset=args.dataset,
                                                               train_logger_name='ocsvm_train_logger',
                                                               file_logger_name='ocsvm_file_logger',
                                                               meta_logger_name='ocsvm_meta_logger',
                                                               model_name='OCSVM',
                                                               pid=args.pid)

        # logging setting
        file_logger.info('============================')
        for key, value in vars(args).items():
            file_logger.info(key + ' = {}'.format(value))
        file_logger.info('============================')

        meta_logger.info('============================')
        for key, value in vars(args).items():
            meta_logger.info(key + ' = {}'.format(value))
        meta_logger.info('============================')

        s_TN = []
Beispiel #17
0
            raise ValueError("dataset {} is not registered.".format(
                config.dataset))
        else:
            train_path = dataset2path[config.dataset]["train"]
            test_path = dataset2path[config.dataset]["test"]
            label_path = dataset2path[config.dataset]["test_label"]
        # %%
        gpu_id = args.gpu_id
        device = torch.device("cuda:{}".format(gpu_id))
        # device = torch.device("cpu")
        # device = torch.device(get_free_device())

        train_logger, file_logger, meta_logger = create_logger(
            dataset=args.dataset,
            train_logger_name='rn_train_logger',
            file_logger_name='rn_file_logger',
            meta_logger_name='rn_meta_logger',
            model_name='RN',
            pid=args.pid)

        # logging setting
        file_logger.info('============================')
        for key, value in vars(args).items():
            file_logger.info(key + ' = {}'.format(value))
        file_logger.info('============================')

        meta_logger.info('============================')
        for key, value in vars(args).items():
            meta_logger.info(key + ' = {}'.format(value))
        meta_logger.info('============================')
Beispiel #18
0
data_aug = True
test_image = None
supervision = 1

iter_mean_grad = 10
max_training_iters = 45000
save_step = 5000

display_step = 10
# learning rate setting
ini_learning_rate = 1e-6
end_learning_rate = 2.5 * 1e-7
batch_size = 1

# log some important info
logger = create_logger(logs_path)
logger.info('The random seed is {}'.format(seed))
logger.info(
    'The pascal base training iteration is {}'.format(pascal_base_iterations))
logger.info(
    'The max objectness training iteration is {}'.format(max_training_iters))
logger.info('The supervision mode is {}'.format(supervision))
logger.info('Data augmentation is {}'.format(data_aug))

# Define Dataset
train_file = 'datasets/davis2016_trainset.txt'
# # small dataset txt file for fast debugging
# train_file = 'datasets/test_algorithm_davis2016_trainset.txt'

dataset = Dataset(train_file,
                  None,
def train(args):

    set_seed(args.seed)
    args.savedir = os.path.join(args.savedir, args.name)
    os.makedirs(args.savedir, exist_ok=True)

    train_loader, val_loader, test_loader = get_data_loaders(args)

    model = get_model(args)
    criterion = get_criterion(args)
    optimizer = get_optimizer(model, args)
    scheduler = get_scheduler(optimizer, args)

    logger = create_logger('%s/logfile.log' % args.savedir, args)
    logger.info(model)
    model.cuda()

    torch.save(args, os.path.join(args.savedir, 'args.pt'))

    start_epoch, global_step, n_no_improve, best_metric = 0, 0, 0, -np.inf

    if os.path.exists(os.path.join(args.savedir, "checkpoint.pt")):
        checkpoint = torch.load(os.path.join(args.savedir, "checkpoint.pt"))
        start_epoch = checkpoint["epoch"]
        n_no_improve = checkpoint["n_no_improve"]
        best_metric = checkpoint["best_metric"]
        model.load_state_dict(checkpoint["state_dict"])
        optimizer.load_state_dict(checkpoint["optimizer"])
        scheduler.load_state_dict(checkpoint["scheduler"])

    logger.info("Training..")
    for i_epoch in range(start_epoch, args.max_epochs):
        train_losses = []
        model.train()
        optimizer.zero_grad()

        for batch in tqdm(train_loader, total=len(train_loader)):
            loss, _, _ = model_forward(i_epoch, model, args, criterion, batch)

            train_losses.append(loss.item())
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()

        model.eval()
        metrics = model_eval(i_epoch, val_loader, model, args, criterion)
        logger.info("Train Loss: {:.4f}".format(np.mean(train_losses)))
        log_metrics("Val", metrics, args, logger)

        tuning_metric = (metrics["acc"])

        scheduler.step(tuning_metric)
        is_improvement = tuning_metric > best_metric
        if is_improvement:
            best_metric = tuning_metric
            n_no_improve = 0
        else:
            n_no_improve += 1

        save_checkpoint(
            {
                "epoch": i_epoch + 1,
                "state_dict": model.state_dict(),
                "optimizer": optimizer.state_dict(),
                "scheduler": scheduler.state_dict(),
                "n_no_improve": n_no_improve,
                "best_metric": best_metric,
            },
            is_improvement,
            args.savedir,
        )

        if n_no_improve >= args.patience:
            logger.info("No improvement. Breaking out of loop.")
            break

    # Test best model
    load_checkpoint(model, os.path.join(args.savedir, "model_best.pt"))
    model.eval()
    test_metrics = model_eval(np.inf,
                              test_loader,
                              model,
                              args,
                              criterion,
                              store_preds=True)
    log_metrics(f"Test - test", test_metrics, args, logger)
Beispiel #20
0
def global_logger(name, propagate=False):
    logger = create_logger(file_=False,
                           console=False,
                           propagate=propagate,
                           name=name)
    return logger
Beispiel #21
0
def main(options):
    torch.autograd.set_detect_anomaly(False)
    logs_dir = options.logs_dir
    require_empty(logs_dir, recreate=options.overwrite)
    logging_filename = os.path.join(logs_dir, "train.log")
    save_model_filename = os.path.join(logs_dir, "model")
    require_empty(save_model_filename, recreate=options.overwrite)
    tboard_dir = os.path.join(logs_dir, "tboard")
    # train on multiple gpu
    if len(options.gpu) == 0:
        device = torch.device('cpu')
        prefetch_data = False
    elif len(options.gpu) == 1:
        device = torch.device('cuda:{}'.format(options.gpu[0]))
        prefetch_data = True
    else:
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" 
        os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(options.gpu)
        device = torch.device('cuda:{}'.format(options.gpu[0]))
        prefetch_data = True
        parallel = True
    logger = create_logger(logging_filename, options.verbose)
    writer = SummaryWriter(log_dir=tboard_dir)
    logger.info("Called with parameters: {}".format(options.__dict__))
    
    #***************#
    if options.net == 'path':
        print(options.augment_gamma)
        train_data = PathDataset(options.train_dir, options.im_w, options.im_h, random_w=options.augment_stroke_width, random_gamma=options.augment_gamma)
        val_data = PathDataset(options.val_dir, options.im_w, options.im_h)
    else:
        train_data = OverlapDataset(options.train_dir, options.im_w, options.im_h, random_w=options.augment_stroke_width)
        val_data = OverlapDataset(options.val_dir, options.im_w, options.im_h)
    train_loader = DataLoader(train_data, batch_size=options.train_batch_size, shuffle=True, num_workers=4)
    val_loader = DataLoader(val_data, batch_size=options.val_batch_size, shuffle=True, num_workers=4)
    #***************#

    logger.info('Total number of train samples: ~{}'.format(len(train_loader) * options.train_batch_size))
    logger.info('Total number of val samples: ~{}'.format(len(val_loader) * options.val_batch_size))

    #***************#
    if options.net == 'path':
        model = PathNet(options.repeat_num, options.conv_hidden_num, input_channel=2).to(device)
        # model = PathNet(options.repeat_num, options.conv_hidden_num, last_activation='sigmoid', input_channel=2).to(device)
    else:
        model = PathNet(options.repeat_num, options.conv_hidden_num, last_activation='sigmoid', input_channel=1).to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=options.lr, weight_decay=options.weight_decay, betas=[0.5, 0.999])
    # lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 150, gamma=0.1)
    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[150,180], gamma=0.1)

    cp_dict = {"model": model
                # 'optimizer': optimizer
                }
    #***************#
    logger.info_trainable_params(model)
    if options.init_model_filename:
        load_model(options.init_model_filename, cp_dict)
    #***************#
    if options.loss == 'l1':
        criterion = nn.L1Loss(reduction='mean')
    elif options.loss == 'bce':
        criterion = nn.BCELoss(reduction='mean')
    else:
        criterion = nn.MSELoss(reduction='mean')
    
    #***************#
    best_val_loss = 100000000
    best_val_iou = 0.0
    metrics = [[criterion, options.loss], [iou, "iou"]]
    for epoch_i in range(options.epochs):
        logger.info('Training batch {}'.format(epoch_i))
        epoch_loss = 0
        start_time = time()
        model.train()
        for j, data in enumerate(train_loader):
            model.zero_grad()
            img, trg, _ = data
            img = img.to(device)
            trg = trg.to(device)
            # global discriminator
            pred = model(img)
            # pred = torch.clamp(pred, 0, 1)
            loss = criterion(pred, trg)
            loss.backward()
            epoch_loss += loss.item()
            torch.nn.utils.clip_grad_norm_(model.parameters(), options.clip)
            optimizer.step()
            lr_scheduler.step()

        logger.info(
            'loss : {loss_G:.4f}'.format(loss_G=epoch_loss/len(train_loader)))
        writer.add_scalar("train_loss", epoch_loss / len(train_loader), global_step=epoch_i)
        logger.info('Time  {}'.format(time() - start_time))
        img_grid = _make_gird(img, pred, trg)
        writer.add_image('train_output', img_grid, global_step=epoch_i)
        
        val_iou, val_loss = validate(val_loader, model, logger, epoch_i, metrics, writer)
        logger.info(
            'val loss : {loss_G:.4f}'.format(loss_G=val_loss))
        logger.info(
            'val iou : {loss_G:.4f}'.format(loss_G=val_iou))
        
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            save_model(os.path.join(save_model_filename, 'best_loss.pth'), cp_dict)
        if val_iou > best_val_iou:
            best_val_iou = val_iou
            save_model(os.path.join(save_model_filename, 'best_iou.pth'), cp_dict)
                
        if (epoch_i % options.batches_before_save) == 0:
            save_model(os.path.join(save_model_filename, f'epoch_{epoch_i}.pth'), cp_dict)
Beispiel #22
0
                                                                                       config.rolling_size, config.pid))
        # %%
        if config.dataset not in dataset2path:
            raise ValueError("dataset {} is not registered.".format(config.dataset))
        else:
            train_path = dataset2path[config.dataset]["train"]
            test_path = dataset2path[config.dataset]["test"]
            label_path = dataset2path[config.dataset]["test_label"]
        # %%
        #device = torch.device(get_free_device())
        device = torch.device("cuda:1")

        train_logger, file_logger, meta_logger = create_logger(dataset=args.dataset,
                                                               h_dim=args.h_dim,
                                                               rolling_size=args.rolling_size,
                                                               train_logger_name='lstmndt_train_logger',
                                                               file_logger_name='lstmndt_file_logger',
                                                               meta_logger_name='lstmndt_meta_logger',
                                                               model_name='LSTMNDT',
                                                               pid=args.pid)

        # logging setting
        file_logger.info('============================')
        for key, value in vars(args).items():
            file_logger.info(key + ' = {}'.format(value))
        file_logger.info('============================')

        meta_logger.info('============================')
        for key, value in vars(args).items():
            meta_logger.info(key + ' = {}'.format(value))
        meta_logger.info('============================')
Beispiel #23
0
    return args


if __name__ == '__main__':
    args = parse_args()

    if args.cfg_file:
        update_cfg(args.cfg_file)

    logdir = osp.join(cfg.SRC_DIR, cfg.LOG_DIR)
    if not osp.exists(logdir):
        os.makedirs(logdir)
    logfile = osp.join(
        logdir, "test_%s_%s_iter_%d" % (cfg.TAG, cfg.PREFIX, cfg.TEST.ITER))
    logger = create_logger(log_file=logfile,
                           withtime=True,
                           propagate=False,
                           name=cfg.LOGGER)

    if cfg.TEST.SAVE_MODEL:
        cfg.TRAIN.BS = cfg.TEST.BS_2D

    logger.info("Configuration: ")
    for handler in logger.handlers:
        pprint(cfg, handler.stream)

    model_path = osp.join(cfg.SRC_DIR, cfg.OUTPUT_DIR, cfg.TAG)
    if args.best.lower() not in ["false", "0"]:
        model_file = osp.join(model_path, "{:s}_best.ckpt".format(cfg.PREFIX))
    else:
        model_file = osp.join(
            model_path, "{:s}_iter_{:d}.ckpt".format(cfg.PREFIX,
Beispiel #24
0
from utils import logger
from cassandra.cluster import Cluster
import pandas as pd

log = logger.create_logger(__name__)


CNT_QUERY_CLIENTS = "SELECT prodID, consID, topic , count(*) as cnt " \
            "FROM CNT WHERE prodid = %s AND consid = %s"\
            "GROUP BY id, prodID, consID, topic ALLOW FILTERING"

CASS_CONTACT_POINTS = ["127.0.0.1"]
CASS_KEYSPACE = "brokertracker"


def connect():
    try:
        cluster = Cluster(CASS_CONTACT_POINTS)
        session = cluster.connect(CASS_KEYSPACE)
        log.info("Connected to Cassandra.")
        return session
    except:
        log.error("Error connecting to Cassandra.")


def getJoinCntDF(session, query, params):
    l = []
    rows = session.execute(query=query,
                           parameters=(params[0], params[1]),
                           trace=True)
    print(rows.get_query_trace())
Beispiel #25
0
def reset_options(options, args, phase='train'):
    if hasattr(args, "batch_size") and args.batch_size:
        options.train.batch_size = options.test.batch_size = args.batch_size
    if hasattr(args, "version") and args.version:
        options.version = args.version
    if hasattr(args, "num_epochs") and args.num_epochs:
        options.train.num_epochs = args.num_epochs
    if hasattr(args, "checkpoint") and args.checkpoint:
        options.checkpoint = args.checkpoint
    if hasattr(args, "folder") and args.folder:
        options.dataset.predict.folder = args.folder
    if hasattr(args, "gpus") and args.gpus:
        options.num_gpus = args.gpus
    if hasattr(args, "shuffle") and args.shuffle:
        options.train.shuffle = options.test.shuffle = True
    if hasattr(args, "name") and args.name:
        options.name = args.name
    cwd = os.getcwd()

    if options.version is None:
        prefix = ""
        if args.options:
            prefix = slugify(args.options) + "_"
        options.version = prefix + datetime.now().strftime(
            '%m%d%H%M%S')  # ignore %Y
    options.log_dir = os.path.join(cwd, options.experiments_dir,
                                   options.log_dir, options.name)
    print('=> creating {}'.format(options.log_dir))
    os.makedirs(options.log_dir, exist_ok=True)

    options.checkpoint_dir = os.path.join(cwd, options.experiments_dir,
                                          options.checkpoint_dir, options.name,
                                          options.version)
    if phase != 'predict':
        print('=> creating {}'.format(options.checkpoint_dir))
        os.makedirs(options.checkpoint_dir, exist_ok=True)

    options.summary_dir = os.path.join(cwd, options.experiments_dir,
                                       options.summary_dir, options.name,
                                       options.version)
    if phase != 'predict':
        print('=> creating {}'.format(options.summary_dir))
        os.makedirs(options.summary_dir, exist_ok=True)

    if phase == 'predict':
        print('=> do not create summary writer for predict')
        writer = None
    else:
        print('=> creating summary writer')
        writer = SummaryWriter(options.summary_dir)

    options.predict_dir = os.path.join(cwd, options.experiments_dir,
                                       options.predict_dir, options.name,
                                       options.version)
    if phase == 'predict':
        print('=> creating {}'.format(options.predict_dir))
        os.makedirs(options.predict_dir, exist_ok=True)

    logger = create_logger(options, phase=phase)
    options_text = pprint.pformat(vars(options))
    logger.info(options_text)

    return logger, writer
Beispiel #26
0
    def __init__(self,
                 network,
                 energy_fn,
                 discriminator,
                 noise_sampler,
                 b,
                 m,
                 eta=1.0,
                 scale=10.0):
        self.energy_fn = energy_fn
        self.logger = create_logger(__name__)
        self.train_op = TrainingOperator(network)
        self.infer_op = InferenceOperator(network, energy_fn)
        self.b = tf.to_int32(tf.reshape(tf.multinomial(tf.ones([1, b]), 1),
                                        [])) + 1
        self.m = tf.to_int32(tf.reshape(tf.multinomial(tf.ones([1, m]), 1),
                                        [])) + 1
        self.network = network
        self.x_dim, self.v_dim = network.x_dim, network.v_dim

        self.z = tf.placeholder(tf.float32, [None, self.x_dim])
        self.x = tf.placeholder(tf.float32, [None, self.x_dim])
        self.xl = tf.placeholder(tf.float32, [None, self.x_dim])
        self.steps = tf.placeholder(tf.int32, [])
        bx, bz = tf.shape(self.x)[0], tf.shape(self.z)[0]

        # Obtain values from inference ops
        # `infer_op` contains Metropolis step
        v = tf.random_normal(tf.stack([bz, self.v_dim]))
        self.z_, self.v_ = self.infer_op((self.z, v), self.steps)

        # Reshape for pairwise discriminator
        x = tf.reshape(self.x, [-1, 2 * self.x_dim])
        xl = tf.reshape(self.xl, [-1, 2 * self.x_dim])

        # Obtain values from train ops
        v1 = tf.random_normal(tf.stack([bz, self.v_dim]))
        x1_, v1_ = self.train_op((self.z, v1), self.b)
        x1_ = x1_[-1]
        x1_sg = tf.stop_gradient(x1_)
        v2 = tf.random_normal(tf.stack([bx, self.v_dim]))
        x2_, v2_ = self.train_op((self.x, v2), self.m)
        x2_ = x2_[-1]
        v3 = tf.random_normal(tf.stack([bx, self.v_dim]))
        x3_, v3_ = self.train_op((x1_sg, v3), self.m)
        x3_ = x3_[-1]

        # The pairwise discriminator has two components:
        # (x, x2) from x -> x2
        # (x1, x3) from z -> x1 -> x3
        #
        # The optimal case is achieved when x1, x2, x3
        # are all from the data distribution
        x_ = tf.concat([tf.concat([x2_, self.x], 1),
                        tf.concat([x3_, x1_], 1)], 0)

        # Concat all v values for log-likelihood training
        v1_ = v1_[-1]
        v2_ = v2_[-1]
        v3_ = v3_[-1]
        v_ = tf.concat([v1_, v2_, v3_], 0)
        v_ = tf.reshape(v_, [-1, self.v_dim])

        d = discriminator(x, reuse=False)
        d_ = discriminator(x_)

        # generator loss

        # TODO: MMD loss (http://szhao.me/2017/06/10/a-tutorial-on-mmd-variational-autoencoders.html)
        # it is easy to implement, but maybe we should wait after this codebase is settled.
        self.v_loss = tf.reduce_mean(0.5 * tf.multiply(v_, v_))
        self.g_loss = tf.reduce_mean(d_) + self.v_loss * eta

        # discriminator loss
        self.d_loss = tf.reduce_mean(d) - tf.reduce_mean(d_)

        epsilon = tf.random_uniform([], 0.0, 1.0)
        x_hat = xl * epsilon + x_ * (1 - epsilon)
        d_hat = discriminator(x_hat)
        ddx = tf.gradients(d_hat, x_hat)[0]
        ddx = tf.norm(ddx, axis=1)
        ddx = tf.reduce_mean(tf.square(ddx - 1.0) * scale)
        self.d_loss = self.d_loss + ddx

        # I don't have a good solution to the tf variable scope mess.
        # So I basically force the NiceLayer to contain the 'generator' scope.
        # See `nice/__init__.py`.
        g_vars = [
            var for var in tf.global_variables() if 'generator' in var.name
        ]
        d_vars = [
            var for var in tf.global_variables()
            if discriminator.name in var.name
        ]

        self.d_train = tf.train.AdamOptimizer(learning_rate=5e-4, beta1=0.5, beta2=0.9)\
            .minimize(self.d_loss, var_list=d_vars)
        self.g_train = tf.train.AdamOptimizer(learning_rate=5e-4, beta1=0.5, beta2=0.9)\
            .minimize(self.g_loss, var_list=g_vars)

        self.init_op = tf.group(tf.global_variables_initializer(),
                                tf.local_variables_initializer())

        gpu_options = tf.GPUOptions(allow_growth=True)
        self.sess = tf.Session(config=tf.ConfigProto(
            inter_op_parallelism_threads=1,
            intra_op_parallelism_threads=1,
            gpu_options=gpu_options,
        ))
        self.sess.run(self.init_op)
        self.ns = noise_sampler
        self.ds = None
        self.path = 'logs/' + energy_fn.name
        try:
            os.makedirs(self.path)
        except OSError:
            pass
Beispiel #27
0
 def __init__(self):
     self.database = EsDatabase()
     self.tickers = config.TICKERS
     self.logger = create_logger('Evaluater')
     pass
Beispiel #28
0
def train():
    set_device()
    output_dir = pathlib.Path(cfg.LOG_DIR)
    output_dir.mkdir(exist_ok=True, parents=True)
    logger = create_logger(name=__name__,
                           output_dir=output_dir,
                           filename='log.txt')
    # 数据集加载
    train_dataset = get_dataset(cfg.DATASET.TRAIN_DATA, cfg, is_training=True)
    val_dataset = get_dataset(cfg.DATASET.VAL_DATA, cfg)

    for batch, (images, labels) in enumerate(train_dataset):
        for i in range(cfg.TRAIN.BATCH_SIZE):
            img = np.array(images[i, :, :, :] * 255).astype(np.int64)
            label = np.array(labels[i, :, :, 0]).astype(np.int64)
            vis_segmentation(img, label, label_names=cfg.DATASET.LABELS)

    # 模型搭建和损失函数配置
    model = create_model(cfg, name=cfg.MODEL_NAME, backbone=cfg.BACKBONE_NAME)
    model = add_regularization(model, tf.keras.regularizers.l2(cfg.LOSS.WEIGHT_DECAY))
    model.summary()

    loss = get_loss(cfg, cfg.LOSS.TYPE)

    # 优化器和学习率配置
    lr = tf.Variable(cfg.SCHEDULER.LR_INIT)
    learning_rate = learning_rate_config(cfg)

    # warmup策略
    def lr_with_warmup(global_steps):
        lr_ = tf.cond(tf.less(global_steps, cfg.SCHEDULER.WARMUP_STEPS),
                      lambda: cfg.SCHEDULER.LR_INIT * tf.cast((global_steps + 1) / cfg.SCHEDULER.WARMUP_STEPS, tf.float32),
                      lambda: tf.maximum(learning_rate(global_steps - cfg.SCHEDULER.WARMUP_STEPS), cfg.SCHEDULER.LR_LOWER_BOUND))
        return lr_

    optimizer = config_optimizer(cfg, learning_rate=lr)

    # 模型保存与恢复
    manager, ckpt = ckpt_manager(cfg, model, logger, optimizer)

    # 训练与验证静态图
    @tf.function
    def train_one_batch(x, y):
        with tf.GradientTape() as tape:
            # 1、计算模型输出和损失
            pred_o = model(x, training=True)
            # pred_o, l2, l3, l4, l5 = model(x, training=True)
            regularization_loss_out = tf.reduce_sum(model.losses)
            # seg_loss_out = loss(y, pred_o) + 0.1 * (loss(y, l2) + loss(y, l3) + loss(y, l4) + loss(y, l5))
            seg_loss_out = loss(y, pred_o)
            total_loss_out = seg_loss_out + regularization_loss_out
        # 计算梯度以及更新梯度, 固定用法
        grads = tape.gradient(total_loss_out, model.trainable_variables)
        optimizer.apply_gradients(zip(grads, model.trainable_variables))

        return total_loss_out, seg_loss_out, pred_o

    @tf.function
    def val_one_batch(x, y):
        # pred_o, _, _, _, _ = model(x, training=False)
        pred_o = model(x, training=False)
        return pred_o

    # region # 记录器和评价指标
    summary_writer = tf.summary.create_file_writer(cfg.LOG_DIR)
    # tf.summary.trace_on(profiler=True)  # 开启Trace(可选)
    # 评价指标
    val_metric = SegmentationMetric(cfg.DATASET.N_CLASSES)
    train_metric = SegmentationMetric(cfg.DATASET.N_CLASSES)
    val_metric.reset()
    train_metric.reset()
    # endregion

    # region # 迭代优化
    for _ in range(int(ckpt.step), cfg.TRAIN.EPOCHS):
        # region # 训练集
        ckpt.step.assign_add(1)
        lr.assign(lr_with_warmup(optimizer.iterations))  # 必须使用assign才能改变optimizer的lr的值,否则,是个固定值
        for batch, (images_batch, labels_batch) in tqdm(enumerate(train_dataset)):
            total_loss, seg_loss, train_pred = train_one_batch(images_batch, labels_batch)
            # 计算训练集精度
            if int(ckpt.step) % cfg.TRAIN.SNAP_SHOT == 1:
                train_out = np.argmax(train_pred, axis=-1)
                for i in range(labels_batch.shape[0]):
                    train_label = np.array(labels_batch[i, :, :, 0]).astype(np.int64)
                    train_metric.addBatch(train_label, train_out[i, :, :])
            # if epoch > 200:
            with summary_writer.as_default():  # 指定记录器
                tf.summary.scalar("train/total_losses", total_loss, step=optimizer.iterations)  # 将当前损失函数的值写入记录器
                tf.summary.scalar("train/segmentation_loss_loss", seg_loss, step=optimizer.iterations)
                tf.summary.scalar("train/learning_rate", lr, step=optimizer.iterations)
        # endregion

        # region # 验证集
        if int(ckpt.step) % cfg.TRAIN.SNAP_SHOT == 1:
            # ----------------------------------------------验证集验证--------------------------------------------------------
            for batch, (images_batch, labels_batch) in tqdm(enumerate(val_dataset)):
                out = val_one_batch(images_batch, labels_batch)
                out = np.squeeze(np.argmax(out, axis=-1))
                labels_batch = np.array(labels_batch[0, :, :, 0]).astype(np.int64)
                val_metric.addBatch(labels_batch, out)

            with summary_writer.as_default():
                tf.summary.scalar("val_metric/mPA", val_metric.meanPixelAccuracy(), step=int(ckpt.step))
                tf.summary.scalar("val_metric/dice", val_metric.dice(), step=int(ckpt.step))
                tf.summary.scalar("val_metric/IoU1", val_metric.IoU(1), step=int(ckpt.step))
                tf.summary.scalar("val_metric/mIoU", val_metric.mIoU(), step=int(ckpt.step))
                tf.summary.scalar("train_metric/mPA", train_metric.meanPixelAccuracy(), step=int(ckpt.step))
                tf.summary.scalar("train_metric/mIoU", train_metric.mIoU(), step=int(ckpt.step))
                tf.summary.scalar("train_metric/dice", train_metric.dice(), step=int(ckpt.step))
                tf.summary.scalar("train_metric/IoU1", train_metric.IoU(1), step=int(ckpt.step))
                # VAL_PA = val_metric.meanPixelAccuracy()
                logger.info('__EPOCH_{}__: TRAIN_mIoU: {:.5f}, TRAIN_mPA: {:.5f}, TRAIN_dice: {:.5f}; '
                            'VAL_mIoU: {:.5f}, VAL_mPA: {:.5f}, VAL_dice: {:.5f}'
                            .format(int(ckpt.step), train_metric.mIoU(), train_metric.meanPixelAccuracy(), train_metric.dice(),
                                    val_metric.mIoU(), val_metric.meanPixelAccuracy(), val_metric.dice()))
            train_metric.reset()
            val_metric.reset()
        # endregion

        # region # 模型保存
        # 使用CheckpointManager保存模型参数到文件并自定义编号
        manager.save(checkpoint_number=int(ckpt.step))
Beispiel #29
0
    parser.add_argument('--max_sen_num',
                        type=int,
                        default=7,
                        help='The maximum number of sentences in the finding')
    parser.add_argument(
        '--single_punc',
        type=bool,
        default=True,
        help=
        'Take punctuation as a single word: If true, generate sentences such as: Hello , world .'
    )

    args = parser.parse_args()
    print(args)
    # Record the training process and values
    logger = create_logger(args.log_path)
    logger.info('=' * 55)
    logger.info(args)
    logger.info('=' * 55)
    # The time of training the same model to get average results
    num_run = 3
    best_bleu_lst = []
    best_meteor_lst = []
    best_rouge_lst = []
    best_cider_lst = []
    for n_run in range(num_run):
        b_bleu, b_meteor, b_rouge, b_cider = train_net(n_run, logger, args)
        best_bleu_lst.append(b_bleu)
        best_meteor_lst.append(b_meteor)
        best_rouge_lst.append(b_rouge)
        best_cider_lst.append(b_cider)
from violance_v2 import ViolenceDetection
from utils.config import Config
from utils.logger import create_logger
import sys

import logging

logger = create_logger("violence")
logger.setLevel(logging.DEBUG)

cfg = Config.fromfile(sys.argv[1])
cfg.mode = "test"
v = ViolenceDetection(cfg)
v.test()