コード例 #1
0
def create_noise_tensor(model, seed=None):
    if seed:
        set_seed(seed)
    return [
        torch.normal(mean=0, std=1, size=p.data.size())
        for p in model.parameters()
    ]
コード例 #2
0
def train(config_file=""):

    opt = TrainOptions()
    args = opt.initialize(config_file=config_file)
    opt.print_options(args)

    # setting seed system wide for proper reproducibility
    set_seed(int(args['experiment'].seed))

    train_dataset = GTSRB(args, setname='train')
    val_dataset = GTSRB(args, setname='valid')
    test_dataset = GTSRB(args, setname='test')

    trainloader = get_loader(args, train_dataset)
    valloader = get_loader(args, val_dataset)
    testloader = get_loader(args, test_dataset)

    net, optimizer, schedular = model.CreateModel(args=args)

    if args['experiment'].restore_from:
        device = torch.device(args['experiment'].device)
        PATH = args['experiment'].restore_from
        checkpoints = torch.load(PATH, map_location=device)

        net.load_state_dict(checkpoints['model_state_dict'])
        optimizer.load_state_dict(checkpoints['optimizer_state_dict'])

    if args['experiment'].wandb:
        init_wandb(net, args)

    train_engine(args=args,
                 trainloader=trainloader,
                 valloader=valloader,
                 model=net,
                 optimizer=optimizer,
                 scheduler=schedular,
                 next_config=config_file)

    log_confusion = True if args['experiment'].wandb else False
    test_acc, test_loss, test_f1, cm, test_precision, test_recall = calc_acc_n_loss(
        args['experiment'], net, testloader, log_confusion)

    print(f'Test Accuracy = {test_acc}')
    print(f'Test Loss = {test_loss}')
    print(f'F1 Score = {test_f1}')
    print(f'Test Precision = {test_precision}')
    print(f'Test Recall = {test_recall}')

    f = open(root_dir + config_file + "/" + config_file + "_train.txt", "w+")
    f.write(
        str(round(test_acc, 3)) + " " + str(round(test_loss, 3)) + " " +
        str(round(test_f1, 3)) + " " + str(round(test_precision, 3)) + " " +
        str(round(test_recall, 3)))
    f.close()

    if args['experiment'].wandb:
        wandb_save_summary(test_acc=test_acc,
                           test_f1=test_f1,
                           test_precision=test_precision,
                           test_recall=test_recall)
コード例 #3
0
ファイル: oml_omniglot.py プロジェクト: 0merjavaid/mrcl
def main():
    p = class_parser.Parser()
    total_seeds = len(p.parse_known_args()[0].seed)
    rank = p.parse_known_args()[0].rank
    all_args = vars(p.parse_known_args()[0])
    print("All args = ", all_args)

    args = utils.get_run(vars(p.parse_known_args()[0]), rank)


    utils.set_seed(args['seed'])

    my_experiment = experiment(args['name'], args, "../results/", commit_changes=False, rank=0, seed=1)
    writer = SummaryWriter(my_experiment.path + "tensorboard")

    logger = logging.getLogger('experiment')

    # Using first 963 classes of the omniglot as the meta-training set
    args['classes'] = list(range(963))

    args['traj_classes'] = list(range(int(963/2), 963))


    dataset = df.DatasetFactory.get_dataset(args['dataset'], background=True, train=True,path=args["path"], all=True)
    dataset_test = df.DatasetFactory.get_dataset(args['dataset'], background=True, train=False, path=args["path"], all=True)

    # Iterators used for evaluation
    iterator_test = torch.utils.data.DataLoader(dataset_test, batch_size=5,
                                                shuffle=True, num_workers=1)

    iterator_train = torch.utils.data.DataLoader(dataset, batch_size=5,
                                                 shuffle=True, num_workers=1)

    sampler = ts.SamplerFactory.get_sampler(args['dataset'], args['classes'], dataset, dataset_test)

    config = mf.ModelFactory.get_model("na", args['dataset'], output_dimension=1000)

    gpu_to_use = rank % args["gpus"]
    if torch.cuda.is_available():
        device = torch.device('cuda:' + str(gpu_to_use))
        logger.info("Using gpu : %s", 'cuda:' + str(gpu_to_use))
    else:
        device = torch.device('cpu')

    maml = MetaLearingClassification(args, config).to(device)


    for step in range(args['steps']):

        t1 = np.random.choice(args['traj_classes'], args['tasks'], replace=False)
        d_traj_iterators = []
        for t in t1:
            d_traj_iterators.append(sampler.sample_task([t]))
        d_rand_iterator = sampler.get_complete_iterator()
        x_spt, y_spt, x_qry, y_qry = maml.sample_training_data(d_traj_iterators, d_rand_iterator,
                                                               steps=args['update_step'], reset=not args['no_reset'])
        if torch.cuda.is_available():
            x_spt, y_spt, x_qry, y_qry = x_spt.to(device), y_spt.to(device), x_qry.to(device), y_qry.to(device)

        accs, loss = maml(x_spt, y_spt, x_qry, y_qry)
コード例 #4
0
def main():
    config = parse_args()
    # Ensure experiment is reproducible.
    # Results may vary across machines!
    utils.set_seed(config['seed'])
    # Set comment to name and then add hparams to tensorboard text
    curr_time = utils.get_time_str()
    logdir = './runs/' + config['logdir'] + '/' + curr_time + ' ' + config[
        'comment']
    writer = SummaryWriter(log_dir=logdir)

    comment = config.pop('comment')
    writer.add_text('config', json.dumps(config, indent=4))

    print('*' * 30 + '\nRunning\n' + json.dumps(config, indent=4) + '\n' +
          '*' * 30)

    model, opt = train(config, writer)

    writer.flush()
    writer.close()

    utils.print_nonzeros(model)

    if config['save_model'] is not None:
        utils.save_run(model, opt, config)
コード例 #5
0
 def __init__(self, options):
     """
     :param dict options: Configuration dictionary.
     """
     # Get config
     self.options = options
     # Define which device to use: GPU, or CPU
     self.device = options["device"]
     # Create empty lists and dictionary
     self.model_dict, self.summary = {}, {}
     # Set random seed
     set_seed(self.options)
     # ------Network---------
     # Instantiate networks
     print("Building models...")
     # Set Autoencoder i.e. setting loss, optimizer, and device assignment (GPU, or CPU)
     self.set_autoencoder()
     # Instantiate and set up Classifier if "supervised" i.e. loss, optimizer, device (GPU, or CPU)
     self.set_classifier() if self.options["supervised"] else None
     # Set scheduler (its use is optional)
     self._set_scheduler()
     # Set paths for results and Initialize some arrays to collect data during training
     self._set_paths()
     # Print out model architecture
     self.get_model_summary()
コード例 #6
0
def main(args):
    set_seed(args.seed)
    
    test_dataset = WaveTestDataset(args.wav_root, args.test_json_path)
    print("Test dataset includes {} samples.".format(len(test_dataset)))
    
    loader = TestDataLoader(test_dataset, batch_size=1, shuffle=False)
    
    model = ConvTasNet.build_model(args.model_path)
    print(model)
    print("# Parameters: {}".format(model.num_parameters))
    
    if args.use_cuda:
        if torch.cuda.is_available():
            model.cuda()
            model = nn.DataParallel(model)
            print("Use CUDA")
        else:
            raise ValueError("Cannot use CUDA.")
    else:
        print("Does NOT use CUDA")
    
    # Criterion
    if args.criterion == 'sisdr':
        criterion = NegSISDR()
    else:
        raise ValueError("Not support criterion {}".format(args.criterion))
    
    pit_criterion = PIT1d(criterion, n_sources=args.n_sources)
    
    tester = Tester(model, loader, pit_criterion, args)
    tester.run()
コード例 #7
0
def test(config_file=""):

    opts = TrainOptions()
    args = opts.initialize(config_file)

    set_seed(int(args['experiment'].seed))

    model = MicroNet(args['experiment'])
    device = args['experiment'].device

    PATH = args['experiment'].restore_from
    checkpoint = torch.load(PATH, map_location=device)

    model.load_state_dict(checkpoint['model_state_dict'])

    if args['experiment'].wandb:
        init_wandb(model, args)

    test_dataset = GTSRB(args, setname='test')
    testloader = get_loader(args, test_dataset)

    log_confusion = True if args['experiment'].wandb else False
    out, histo = calc_acc_n_loss_2(args['experiment'],
                                   model,
                                   testloader,
                                   log_matrix=log_confusion)

    xai.rise(model, testloader, args["experiment"].num_classes, out,
             "data/traffic_sign_interiit/dataset/New_Test/")

    return out, histo
コード例 #8
0
def run_from_config(config_fpath):
    # TODO: Set config file

    with open(config_fpath, 'r') as config_file:
        config = yaml.safe_load(config_file)

    # Set RNG seed
    if 'seed' in config:
        utils.set_seed(config['seed'])

    # Load our pre-trained model
    model = model_utils.load_trained_model(config.pop('model'),
                                           config['train_set'])
    # Get a fixed calibration / evaluation set
    calibration_dataset, eval_dataset = data_utils.get_cal_eval_split(
        config['test_set'], config['num_eval'])

    schedule_type = config.pop('schedule')
    if schedule_type == 'linear':
        schedule = linear_schedule(config['rate'], config['num_timesteps'])
    elif schedule_type == 'triangular':
        schedule = linear_schedule(config['rate'], config['num_timesteps'])
    else:
        raise NotImplementedError

    return run_experiment_rotate(model, calibration_dataset, eval_dataset,
                                 schedule, **config)
コード例 #9
0
def run(conf: DictConfig, current_dir) -> None:
    """
    Run pytorch-lightning model

    Args:
        new_dir:
        conf: hydra config

    """
    set_seed(conf.training.random_seed)
    hparams = OmegaConf.to_container(conf)

    trainer = pl.Trainer(**conf.trainer)

    dm = load_obj(conf.training.data_module_name)(hparams=hparams, conf=conf)
    dm.setup()

    model = load_obj(conf.training.lightning_module_name)(hparams=hparams, conf=conf, tag_to_idx=dm.tag_to_idx,
                                                          embedder=dm.embedder)

    # best_path = 'C:/Users/Ангелина/Python_codes/wsd_train_folder/outputs/2021-02-02_16-54-30/saved_models/epoch=22_valid_score_mean=0.9609.ckpt'
    best_path = 'C:/Users/Ангелина/Python_codes/wsd_train_folder/outputs/2021-02-09_19-27-50_elmo/saved_models/epoch=22_valid_score_mean=0.9617.ckpt'
    model = model.load_from_checkpoint(
        best_path, hparams=hparams, conf=conf, tag_to_idx=dm.tag_to_idx, embedder=dm.embedder, strict=False
    )
    save_name = best_path.split('/')[-1][:-5]
    model_name = f'C:/Users/Ангелина/Python_codes/wsd_train_folder/outputs/2021-02-09_19-27-50_elmo/saved_models/{save_name}.pth'
    print(model_name)
    torch.save(model.wsd_model.state_dict(), model_name)
コード例 #10
0
def main(args):
    roc_aucs = []
    for i in range(args.n_runs):
        seed = i
        set_seed(seed)

        _, X, (train_idx, train_y), (val_idx, val_y), (test_idx,
                                                       test_y), names = tools.get_data(args.__dict__, seed=seed)

        if X is None or not X.shape[1]:
            raise ValueError('No features')

        train_x = X[train_idx].cuda()
        val_x = X[val_idx].cuda()
        test_x = X[test_idx].cuda()
        print('train_x', train_x.mean())
        print('test_x', test_x.mean())

        probs = mlp_fit_predict(train_x, train_y, test_x, val=(val_x, val_y))
        roc_auc = roc_auc_score(test_y, probs)
        roc_aucs.append(roc_auc)

        p = np.concatenate(
            [names[test_idx].reshape(-1, 1), probs.reshape(-1, 1)], axis=1)
        save_preds(p, args, seed)

    print('Auc(all):', roc_aucs)
    print('Auc:', np.mean(roc_aucs))

    return np.mean(roc_aucs), np.std(roc_aucs)
コード例 #11
0
def recreate_model(seeds, env):
    if not seeds:
        raise ValueError(f"Seeds list: {seeds}")
    set_seed(seeds[0])
    model = create_model(env)
    for seed in seeds[1:]:
        model = mutate(model, seed=seed)
    return model
コード例 #12
0
def run(conf: DictConfig) -> None:
    """
    Run pytorch-lightning model

    Args:
        new_dir:
        conf: hydra config

    """
    set_seed(conf.training.random_seed)

    hparams = OmegaConf.to_container(conf)

    # log_save_path = conf.general.all_logs_storage_path

    conf.callbacks.model_checkpoint.params.filepath = os.getcwd() + conf.callbacks.model_checkpoint.params.filepath

    checkpoint_callback: ModelCheckpoint = ModelCheckpoint(**conf.callbacks.model_checkpoint.params)
    early_stop_callback = EarlyStopping(**conf.callbacks.early_stopping.params)

    loggers = []
    if conf.logging.enable_logging:
        for logger in conf.logging.loggers:
            loggers.append(load_obj(logger.class_name)(**logger.params))

    trainer = pl.Trainer(logger=loggers, checkpoint_callback=checkpoint_callback, callbacks=[early_stop_callback],
                         **conf.trainer)

    dm = load_obj(conf.training.data_module_name)(hparams=hparams, conf=conf)
    dm.setup()
    num_steps_in_epoch = len(dm.train_dataloader())

    model = load_obj(conf.training.lightning_module_name)(hparams=hparams, conf=conf, tag_to_idx=dm.tag_to_idx,
                                                          embedder=dm.embedder, num_steps=num_steps_in_epoch)

    trainer.fit(model, dm)

    if conf.general.save_pytorch_model:
        if conf.general.save_best:
            best_path = trainer.checkpoint_callback.best_model_path  # type: ignore
            print('Best model score ', trainer.checkpoint_callback.best_model_score)
            # extract file name without folder and extension
            save_name = best_path.split('/')[-1][:-5]
            model = model.load_from_checkpoint(
                best_path, hparams=hparams, conf=conf, tag_to_idx=dm.tag_to_idx, embedder=dm.embedder, strict=False
            )
            model_name = f'saved_models/{save_name}.pth'
            print(model_name)
            torch.save(model.model.state_dict(), model_name)
        else:
            os.makedirs('saved_models', exist_ok=True)
            model_name = 'saved_models/last.pth'
            print(model_name)
            torch.save(model.model.state_dict(), model_name)

    trainer.test(model=model, datamodule=dm)
コード例 #13
0
def main(args):
    set_seed(args.seed)
    
    speakers_path = os.path.join(args.librispeech_root, "SPEAKERS.TXT")
    samples = int(args.sr*args.duration)
    
    json_data = make_json_data(args.wav_root, args.json_path, speakers_path=speakers_path, samples=samples, n_sources=args.n_sources)
        
    with open(args.json_path, 'w') as f:
        json.dump(json_data, f, indent=4)
コード例 #14
0
def main(args):
    set_seed(args.seed)
    
    samples = int(args.sr * args.duration)
    overlap = 0
    max_samples = int(args.sr * args.valid_duration)

    train_dataset = IdealMaskSpectrogramTrainDataset(args.train_wav_root, args.train_list_path, fft_size=args.fft_size, hop_size=args.hop_size, window_fn=args.window_fn, mask_type=args.ideal_mask, threshold=args.threshold, samples=samples, overlap=overlap, n_sources=args.n_sources)
    valid_dataset = IdealMaskSpectrogramEvalDataset(args.valid_wav_root, args.valid_list_path, fft_size=args.fft_size, hop_size=args.hop_size, window_fn=args.window_fn, mask_type=args.ideal_mask, threshold=args.threshold, max_samples=max_samples, n_sources=args.n_sources)
    print("Training dataset includes {} samples.".format(len(train_dataset)))
    print("Valid dataset includes {} samples.".format(len(valid_dataset)))
    
    loader = {}
    loader['train'] = TrainDataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
    loader['valid'] = EvalDataLoader(valid_dataset, batch_size=1, shuffle=False)
    
    if args.max_norm is not None and args.max_norm == 0:
        args.max_norm = None
    
    args.n_bins = args.fft_size//2 + 1
    model = DANet(args.n_bins, embed_dim=args.embed_dim, hidden_channels=args.hidden_channels, num_blocks=args.num_blocks, causal=args.causal, mask_nonlinear=args.mask_nonlinear, iter_clustering=args.iter_clustering)
    print(model)
    print("# Parameters: {}".format(model.num_parameters))
    
    if args.use_cuda:
        if torch.cuda.is_available():
            model.cuda()
            model = nn.DataParallel(model)
            print("Use CUDA")
        else:
            raise ValueError("Cannot use CUDA.")
    else:
        print("Does NOT use CUDA")
        
    # Optimizer
    if args.optimizer == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    elif args.optimizer == 'adam':
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    elif args.optimizer == 'rmsprop':
        optimizer = torch.optim.RMSprop(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    else:
        raise ValueError("Not support optimizer {}".format(args.optimizer))
    
    # Criterion
    if args.criterion == 'l1loss':
        criterion = L1Loss(dim=(2,3), reduction='mean') # (batch_size, n_sources, n_bins, n_frames)
    elif args.criterion == 'l2loss':
        criterion = L2Loss(dim=(2,3), reduction='mean') # (batch_size, n_sources, n_bins, n_frames)
    else:
        raise ValueError("Not support criterion {}".format(args.criterion))
    
    trainer = AdhocTrainer(model, loader, criterion, optimizer, args)
    trainer.run()
コード例 #15
0
def forward_model(best_model, method):
    args = best_model['args']

    torch.cuda.set_device(args.gpu)
    set_seed(args)

    # load and preprocess dataset
    all_data = load_dataset(args)
    training = all_data[:int(len(all_data) * 0.7)]
    validation = all_data[int(len(all_data) * 0.7):int(len(all_data) * 0.8)]
    testing = all_data[int(len(all_data) * 0.8):]

    train_loader = DataLoader(training,
                              batch_size=1000,
                              shuffle=True,
                              collate_fn=collate)
    val_loader = DataLoader(validation,
                            batch_size=1000,
                            shuffle=True,
                            collate_fn=collate)
    test_loader = DataLoader(testing,
                             batch_size=4000,
                             shuffle=False,
                             collate_fn=collate)

    dataset = (None, np.zeros((15, 15)), np.zeros(
        (1, args.num_factors)), None, None, None, None)
    # create model
    model = get_model(dataset, args, mode='multilabel').cuda()

    for step, (g, labels, gt_adjs) in enumerate(test_loader):
        model.load_state_dict(best_model['model_state_dict'])
        model.eval()

        # update the new graph
        model.g = g

        features = g.ndata['feat'].float().cuda()
        labels = labels.cuda()
        logits = model(features)  #.view(-1, n_class, n_latent)

        hidden = model.get_hidden_feature()
        matrix = hidden[0]  # #sample x dim
        correlation = np.zeros((matrix.shape[1], matrix.shape[1]))
        for i in range(matrix.shape[1]):
            for j in range(matrix.shape[1]):
                cof = scipy.stats.pearsonr(matrix[:, i], matrix[:, j])[0]
                correlation[i][j] = cof

        plot_corr(np.abs(correlation), save=f'{method}.png')
コード例 #16
0
def main(args):
    set_seed(args.seed)
    
    loader = {}
    
    train_dataset = WaveTrainDataset(args.wav_root, args.train_json_path)
    valid_dataset = WaveTrainDataset(args.wav_root, args.valid_json_path)
    print("Training dataset includes {} samples.".format(len(train_dataset)))
    print("Valid dataset includes {} samples.".format(len(valid_dataset)))
    
    loader['train'] = TrainDataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
    loader['valid'] = TrainDataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False)
    
    model = ConvTasNet(args.n_basis, args.kernel_size, stride=args.stride, enc_basis=args.enc_basis, dec_basis=args.dec_basis, enc_nonlinear=args.enc_nonlinear, window_fn=args.window_fn, sep_hidden_channels=args.sep_hidden_channels, sep_bottleneck_channels=args.sep_bottleneck_channels, sep_skip_channels=args.sep_skip_channels, sep_kernel_size=args.sep_kernel_size, sep_num_blocks=args.sep_num_blocks, sep_num_layers=args.sep_num_layers, dilated=args.dilated, separable=args.separable, causal=args.causal, sep_nonlinear=args.sep_nonlinear, sep_norm=args.sep_norm, mask_nonlinear=args.mask_nonlinear, n_sources=args.n_sources)
    print(model)
    print("# Parameters: {}".format(model.num_parameters))
    
    if args.use_cuda:
        if torch.cuda.is_available():
            model.cuda()
            model = nn.DataParallel(model)
            print("Use CUDA")
        else:
            raise ValueError("Cannot use CUDA.")
    else:
        print("Does NOT use CUDA")
        
    # Optimizer
    if args.optimizer == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    elif args.optimizer == 'adam':
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    elif args.optimizer == 'rmsprop':
        optimizer = torch.optim.RMSprop(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    else:
        raise ValueError("Not support optimizer {}".format(args.optimizer))
    
    # Criterion
    if args.criterion == 'sisdr':
        criterion = NegSISDR()
    else:
        raise ValueError("Not support criterion {}".format(args.criterion))
    
    pit_criterion = PIT1d(criterion, n_sources=args.n_sources)
    
    trainer = Trainer(model, loader, pit_criterion, optimizer, args)
    trainer.run()
コード例 #17
0
def main(config):
    state_dict = utils.load_state_dict(fpath=config['saved_model_path'])
    model_state = state_dict['model_state']
    mask = state_dict['mask']
    training_cfg = state_dict['config']

    utils.set_seed(training_cfg['seed'])
    # Instantiate model & attributes, load state dict 
    model = getters.get_quant_model(config)

    # Need to wrap model with data parallel for it to work it seems

    init_attrs(model, training_cfg)

    # Load model weights and mask
    model = load_weights_and_mask(config, model, model_state, mask)
    print_size_of_model(model)
    # Switch to eval mode, move to cpu and prepare for quantization
    # do module fusion
    # fuse_model(model)
    quant_model = prepare_model_for_quantization(model, config)

    # Grab all necessary objects
    loaders, sizes = getters.get_dataloaders(training_cfg)
    train_loader, _, test_loader = loaders
    train_size, _, test_size = sizes
    batches_per_train_epoch = math.ceil(train_size / training_cfg['batch_size'])
    batches_per_test_epoch = math.ceil(test_size / training_cfg['test_batch_size'])

    # Calibration (could possibly use more epochs)
    calib_acc, calib_loss = evaluate(quant_model, train_loader, batches_per_train_epoch)

    torch.quantization.convert(quant_model, inplace=True)
    logger.info('Succesfully quantized model!')

    print_size_of_model(quant_model)
    logger.info('Evaluating...')
    train_acc, train_loss = evaluate(quant_model, train_loader, batches_per_train_epoch)
    test_acc, test_loss = evaluate(quant_model, test_loader, batches_per_test_epoch)
    
    logger.info('train acc: {} train loss: {}'.format(train_acc, train_loss))
    logger.info('test acc: {} test loss: {}'.format(test_acc, test_loss))
    
    # Save model in same folder
    save_path = config['saved_model_path'].replace('model.pt', '')
    utils.save_run_quant(quant_model, save_path, config, train_acc, test_acc)
コード例 #18
0
ファイル: main.py プロジェクト: ignavier/golem
def main():
    # Get arguments parsed
    args = get_args()

    # Setup for logging
    output_dir = 'output/{}'.format(get_datetime_str())
    create_dir(output_dir)  # Create directory to save log files and outputs
    LogHelper.setup(log_path='{}/training.log'.format(output_dir),
                    level='INFO')
    _logger = logging.getLogger(__name__)
    _logger.info("Finished setting up the logger.")

    # Save configs
    save_yaml_config(vars(args), path='{}/config.yaml'.format(output_dir))

    # Reproducibility
    set_seed(args.seed)

    # Load dataset
    dataset = SyntheticDataset(args.n, args.d, args.graph_type, args.degree,
                               args.noise_type, args.B_scale, args.seed)
    _logger.info("Finished loading the dataset.")

    # Load B_init for initialization
    if args.init:
        if args.init_path is None:
            args.init_path = get_init_path('output/')

        B_init = np.load('{}'.format(args.init_path))
        _logger.info("Finished loading B_init from {}.".format(args.init_path))
    else:
        B_init = None

    # GOLEM
    B_est = golem(dataset.X, args.lambda_1, args.lambda_2,
                  args.equal_variances, args.num_iter, args.learning_rate,
                  args.seed, args.checkpoint_iter, output_dir, B_init)
    _logger.info("Finished training the model.")

    # Post-process estimated solution
    B_processed = postprocess(B_est, args.graph_thres)
    _logger.info("Finished post-processing the estimated graph.")

    # Checkpoint
    checkpoint_after_training(output_dir, dataset.X, dataset.B, B_init, B_est,
                              B_processed, _logger.info)
コード例 #19
0
def main(args):
    set_seed(111)

    test_dataset = EvalDataset(args.test_image_root, args.test_path, H=args.H, W=args.W, R=args.R, G=args.G)
    print("Test dataset includes {} images.".format(len(test_dataset)))
    
    C = 3
    channels = args.channels.replace('[','').replace(']','').split(',')
    channels_backbone = [
        int(channel) for channel in channels
    ]
    logR = int(math.log2(args.R))

    channels_down = [C]

    for r in range(logR//2):
        channel = channels_backbone[0]//(logR//2 - r)
        channels_down.append(channel)

    downsample_net = DownsampleNetBase(channels_down, kernel_size=args.K_down, stride=args.S_down, pool=args.pool_down)

    backbone = UNet2d(channels_backbone, kernel_size=args.K_backbone, stride=args.S_backbone, dilated=args.dilated, separable=args.separable, nonlinear_enc=args.nonlinear_backbone, nonlinear_dec=args.nonlinear_backbone)

    head_list = ['heatmap', 'local_offset', 'size']
    head_modules = {
        'heatmap': HeatmapNetBase(channels_backbone[0]),
        'local_offset': LocalOffsetNetBase(channels_backbone[0]),
        'size': SizeNetBase(channels_backbone[0])
    }

    head_net = ObjectDetectionNetBase(head_modules=head_modules)

    model = CenterNet(downsample_net, backbone, head_net)
    print(model, flush=True)
    print("# Parameters:", model.num_parameters)

    if torch.cuda.is_available():
        model.cuda()
        model = nn.DataParallel(model)
        print("Use CUDA")
    else:
        print("Does NOT use CUDA")
        
    evaluater = Evaluater(model, test_dataset, args)
    evaluater.run()
コード例 #20
0
    def preprocess_args(config):
        config['device'] = get_device()
        config['n_classes'] = 2 if config['loss_func'] == 'ce' else 1

        # Check all provided paths:
        if not os.path.exists(config['data_path']):
            raise ValueError("[!] ERROR: Dataset path does not exist")
        else:
            LOGGER.info("Data path checked..")
        if not os.path.exists(config['model_path']):
            LOGGER.warning(
                "Creating checkpoint path for saved models at:  {}\n".format(
                    config['model_path']))
            os.makedirs(config['model_path'])
        else:
            LOGGER.info("Model save path checked..")
        if 'config' in config:
            if not os.path.isfile(config['config']):
                raise ValueError("[!] ERROR: config JSON path does not exist")
            else:
                LOGGER.info("config JSON path checked..")
        if not os.path.exists(config['vis_path']):
            LOGGER.warning(
                "Creating checkpoint path for Tensorboard visualizations at:  {}\n"
                .format(config['vis_path']))
            os.makedirs(config['vis_path'])
        else:
            LOGGER.info("Tensorboard Visualization path checked..")
            LOGGER.info(
                "Cleaning Visualization path of older tensorboard files...\n")
            # shutil.rmtree(config['vis_path'])

        # Print args
        print("\n" + "x" * 50 +
              "\n\nRunning training with the following parameters: \n")
        for key, value in config.items():
            if not key.endswith('transf'):
                print(key + ' : ' + str(value))
        print("\n" + "x" * 50)

        # config['vis_path'] = os.path.join(config['vis_path'], '{}_conf{}'.format(config['pretrained_model_file'], config['confounder_repeat']))
        config['writer'] = SummaryWriter(config['vis_path'])

        set_seed(config['seed'])
        return config
コード例 #21
0
def run_from_config(config_fpath):
    # TODO: Set config file

    with open(config_fpath, 'r') as config_file:
        config = yaml.safe_load(config_file)

    # Set RNG seed
    if 'seed' in config:
        utils.set_seed(config['seed'])

    # Load our pre-trained model
    model = model_utils.load_trained_model(config.pop('model'),
                                           config['train_set'])
    # Get a fixed calibration / evaluation set
    calibration_dataset, eval_dataset = data_utils.get_cal_eval_split(
        config['test_set'], config['num_eval'])

    return run_experiment(model, calibration_dataset, eval_dataset, **config)
コード例 #22
0
def run_from_config(config_fpath):
    with open(config_fpath, 'r') as config_file:
        config = yaml.safe_load(config_file)

    # Set RNG seed
    if 'seed' in config:
        utils.set_seed(config['seed'])

    model_dict = {'RandomForest': RandomForestClassifier(),
                  'AdaBoost': AdaBoostClassifier(),
                  'GaussianNB': GaussianNB(),
                  'LogisticRegression': LogisticRegression()}
    model = model_dict[config.pop('model')]
    dataset = config.pop('dataset')
    out_path = config.pop('out_path')

    return run_experiment(model, dataset, config.pop('num_eval'), config.pop('num_cal'), config.pop('seed'),
                          out_path=out_path, **config)
コード例 #23
0
    def __init__(self, model: Union[str, Path, GPT2PreTrainedModel] = 'gpt2', tokenizer: str = 'gpt2', seed: int = 42):
        # Set up device
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        n_gpu = torch.cuda.device_count()
        utils.set_seed(seed, n_gpu)

        # Set up model
        if isinstance(model, Path) or isinstance(model, str):
            model = GPT2LMHeadModel.from_pretrained(str(model))
        self.model = model.to(self.device)

        # Set up tokenizer
        # IMPORTANT: Note that setting the pad token like this in the constructor gives the pad_token the
        # pad_token_id = 50256, which normally belongs to the <EOS> token_id in GPT2. This is a very ugly
        # way that works at the moment of setting the pad_token_id to the <EOS> token that is already
        # included in the vocab size.
        self.tokenizer = GPT2Tokenizer.from_pretrained(tokenizer, pad_token=self.STOP_TOKEN)
        assert self.tokenizer.eos_token_id == self.tokenizer.pad_token_id
コード例 #24
0
ファイル: run_dcgan_mnist.py プロジェクト: Jarvis73/GAN.TFv2
def train(_run, _config):
    cfg = utils.Map(_config)
    utils.set_seed(cfg.seed)
    logger = loggers.get_global_logger(name=NAME)

    # Build Model and Trainer
    logger.info(f"Initialize ==> Model {cfg.m.model}")
    model = dcgan.DCGAN()
    logger.info(f"           ==> Trainer")
    train_obj = Trainer(cfg, model)

    # Build data loader
    logger.info(f"           ==> Data loader for {ModeKeys.TRAIN}")
    train_dataset = mnist.loader(ModeKeys.TRAIN)[0]

    # Start training
    try:
        train_obj.start_training_loop(train_dataset)
    except KeyboardInterrupt:
        logger.info("Main process is terminated by user.")
    finally:
        logger.info(f"Ended running with id {_run._id}.")
コード例 #25
0
def main(args):
    set_seed(args.seed)

    test_dataset = IdealMaskSpectrogramTestDataset(args.wav_root,
                                                   args.test_json_path,
                                                   fft_size=args.fft_size,
                                                   hop_size=args.hop_size,
                                                   window_fn=args.window_fn,
                                                   mask_type=args.ideal_mask,
                                                   threshold=args.threshold)
    print("Test dataset includes {} samples.".format(len(test_dataset)))

    args.F_bin = args.fft_size // 2 + 1
    loader = AttractorTestDataLoader(test_dataset, batch_size=1, shuffle=False)

    model = DANet.build_model(args.model_path)
    print(model)
    print("# Parameters: {}".format(model.num_parameters))

    if args.use_cuda:
        if torch.cuda.is_available():
            model.cuda()
            model = nn.DataParallel(model)
            print("Use CUDA")
        else:
            raise ValueError("Cannot use CUDA.")
    else:
        print("Does NOT use CUDA")

    # Criterion
    if args.criterion == 'l2loss':
        criterion = L2Loss()
    else:
        raise ValueError("Not support criterion {}".format(args.criterion))

    pit_criterion = PIT2d(criterion, n_sources=args.n_sources)

    tester = AttractorTester(model, loader, pit_criterion, args)
    tester.run()
コード例 #26
0
 def __init__(self, options):
     """
     :param dict options: Configuration dictionary.
     """
     # Load pre-trained image autoencoder model
     model_img = AEModel(options)
     # Load weights
     model_img.load_models()
     # Extract autoencoder from the model
     self.autoencoder = model_img.autoencoder
     # Get config
     self.options = options
     # Define which device to use: GPU, or CPU
     self.device = options["device"]
     # Create empty lists and dictionary
     self.model_dict, self.summary = {}, {}
     # Set random seed
     set_seed(self.options)
     # Set paths for results and Initialize some arrays to collect data during training
     self._set_paths()
     # Set directories i.e. create ones that are missing.
     set_dirs(self.options)
     # ------Network---------
     # Instantiate networks
     print("Building RNA models for Data Translation...")
     # Turn off convolution to get fully-connected AE model
     self.options["convolution"] = False
     # Set RNA Autoencoder i.e. setting loss, optimizer, and device assignment (GPU, or CPU)
     self.set_autoencoder_rna()
     # Set AEE i.e. setting loss, optimizer, and device assignment (GPU, or CPU)
     if self.options["joint_training_mode"] == "aae":
         self.set_aae()
         self.options["supervised"] = False
     # Instantiate and set up Classifier if "supervised" i.e. loss, optimizer, device (GPU, or CPU)
     self.set_classifier_dt() if self.options["supervised"] else None
     # Set scheduler (its use is optional)
     self._set_scheduler()
     # Print out model architecture
     self.print_model_summary()
コード例 #27
0
def run_once(args):

    cfg, run_id, path = args

    sim_path = path + "/" + cfg.simulator.save_folder
    if not os.path.exists(sim_path):
        os.makedirs(sim_path)

    simulator = Simulator(cfg, sim_path, log)
    simulator.start()

    # -- Set seed
    cfg.general.seed = utils.set_seed(cfg.general.seed)

    # -- Load simulator
    # TODO 2 start server with config
    # TODO 2 Save simulator config in path ( see line 41 with save_config(

    # -- Resume agent and metrics if checkpoints are available
    resume_path = path + "/" + cfg.checkpoint
    if resume_path:
        log.info("Resuming training ...")
        cfg.agent.resume = resume_path
    logging.info('listening to server %s:%s', cfg.simulator.host,
                 cfg.simulator.port)

    # -- Get agent
    agent = get_agent(cfg.agent)
    agent.set_simulator(cfg)

    os.chdir(sim_path)

    benchmark_agent = DemoBenchmark(cfg.simulator.town)

    # -- Init finished
    #save_config(os.path.join(cfg.general.common.save_path, "ran_cfg"), cfg)

    # Now actually run the driving_benchmark
    #import pdb; pdb.set_trace()
    run_driving_benchmark(agent, benchmark_agent, cfg.simulator.town,
                          cfg.simulator.carla_log_name,
                          cfg.simulator.continue_experiment,
                          cfg.simulator.host, cfg.simulator.port)

    simulator.kill_process()
コード例 #28
0
ファイル: train.py プロジェクト: nemodrive/SteeringNetwork
def run_once(args):
    cfg, run_id, path = args

    # -- Set seed
    cfg.general.seed = utils.set_seed(cfg.general.seed)

    # -- Get data loaders
    data_loader = get_data_loader(cfg.data_loader)

    train_data = data_loader.get_train_loader()
    test_data = data_loader.get_test_loader()

    # -- Resume agent and metrics if checkpoints are available
    # TODO Resume
    if cfg.checkpoint != "":
        resume_path = path + "/" + cfg.checkpoint
        log.info("Resuming training ...")
        cfg.agent.resume = resume_path

    # -- Get agent
    agent = get_agent(cfg.agent)

    # -- Should have some kind of reporting agent
    # TODO Implement reporting agent

    # -- Init finished
    save_config(os.path.join(cfg.general.common.save_path, "ran_cfg"), cfg)

    eval_freq = cfg.train.eval_freq
    no_epochs = cfg.train.no_epochs - agent.get_train_epoch()

    for epoch in range(no_epochs):
        log.info("Train epoch: {}".format(epoch))
        agent.train(train_data)
        if epoch % eval_freq == 0:
            agent.test(test_data)
        print("Finished an epoch :D")

    with open(path + "/loss_values_train", "wb") as f:
        pickle.dump(agent.loss_values_train, f)

    with open(path + "/loss_values_test", "wb") as f:
        pickle.dump(agent.loss_values_test, f)

    agent.eval_agent()
コード例 #29
0
def run_once(args):
    cfg, run_id, path = args

    # -- Set seed
    cfg.general.seed = utils.set_seed(cfg.general.seed)

    # -- Resume agent and metrics if checkpoints are available
    # TODO Resume
    resume_path = path + "/" + cfg.checkpoint
    if resume_path:
        log.info("Resuming training ...")
        cfg.agent.resume = resume_path

    # -- Get agent
    agent = get_agent(cfg.agent)

    # -- Should have some kind of reporting agent
    # TODO Implement reporting agent

    # -- Init finished
    save_config(os.path.join(cfg.general.common.save_path, "ran_cfg"), cfg)

    agent.eval_agent()
コード例 #30
0
def run_once(args):
    cfg, run_id, path = args

    # -- Set seed
    cfg.general.seed = utils.set_seed(cfg.general.seed)

    # -- Resume agent and metrics if checkpoints are available
    # TODO Resume
    resume_path = path + "/" + cfg.checkpoint
    if resume_path:
        log.info("Network_activation ...")
        cfg.agent.resume = resume_path

    # -- Get agent
    agent = get_agent(cfg.agent)

    if cfg.eval_model is False:
        log.info("Not in eval mode")
        return

    if cfg.image_number != -1:
        eval_network(agent, cfg)
    else:
        pass