示例#1
0
def train_gmn():

    # ==> initial check
    assert args.dataset == 'imagenet'

    # ==> gpu configuration
    ut.initialize_GPU(args)

    # ==> set up model path and log path.
    model_path, log_path = ut.set_path(args)

    # ==> import library
    import keras
    import data_loader
    import model_factory
    import data_generator

    # ==> get dataset information
    trn_config = data_loader.get_config(args)
    print('trn_config:', trn_config)
    params = {
        'cg': trn_config,
        'processes': 12,
        'batch_size': args.batch_size,
    }
    trn_gen, val_gen = data_generator.setup_generator(**params)

    # ==> load model
    gmn = model_factory.two_stream_matching_networks(trn_config)
    gmn.summary()

    # ==> attempt to load pre-trained model
    if args.resume:
        if os.path.isfile(args.resume):
            gmn.load_weights(os.path.join(args.resume), by_name=True)
            print('==> successfully loading the model: {}'.format(args.resume))
        else:
            print("==> no checkpoint found at '{}'".format(args.resume))

    # ==> set up callbacks, e.g. lr schedule, tensorboard, save checkpoint.
    normal_lr = keras.callbacks.LearningRateScheduler(ut.step_decay(args))
    tbcallbacks = keras.callbacks.TensorBoard(log_dir=log_path,
                                              histogram_freq=0,
                                              write_graph=False,
                                              write_images=False)
    callbacks = [
        keras.callbacks.ModelCheckpoint(os.path.join(model_path, 'model.h5'),
                                        monitor='val_loss',
                                        save_best_only=True,
                                        mode='min'), normal_lr, tbcallbacks
    ]

    gmn.fit_generator(trn_gen,
                      steps_per_epoch=600,
                      epochs=args.epochs,
                      validation_data=val_gen,
                      validation_steps=100,
                      callbacks=callbacks,
                      verbose=1)
示例#2
0
def exec_struc2vec(args):
    """
    Pipeline for representational learning for all nodes in a graph.
    """
    if args.weighted and not args.directed:
        raise NotImplementedError(
            'edge weights are only implemented for directed graphs')
    item_paralel = None
    if args.item:
        item_paralel = args.item
    pathhhh = set_path(item_paralel)
    path_pickle(item_paralel)
    path_random_walk(pathhhh)
    global random_walk_path
    random_walk_path = pathhhh
    ss = item_paralel
    if args.OPT3:
        until_layer = args.until_layer
    else:
        until_layer = None

    if args.embed_subset:
        embedding_vertices = read_embedding_set(args)
    else:
        embedding_vertices = None

    graph_dict, in_degrees, out_degrees, val_density = read_graph(
        args)  # in_degrees = out_degrees = {} if not args.directed
    G = graph.Graph(graph_dict,
                    args.directed,
                    args.workers,
                    bfs_workers=args.bfs_workers,
                    until_layer=until_layer,
                    in_degrees=in_degrees,
                    out_degrees=out_degrees,
                    embedding_vertices=embedding_vertices,
                    density=val_density)

    if args.OPT1:
        G.preprocess_neighbors_with_bfs_compact()
    else:
        G.preprocess_neighbors_with_bfs()

    if args.OPT2:
        G.create_vectors()
        G.calc_distances(compact_degree=args.OPT1)
    else:
        G.calc_distances_all_vertices(compact_degree=args.OPT1)

    G.create_distances_network()
    G.preprocess_parameters_random_walk()

    G.simulate_walks(args.num_walks, args.walk_length)

    return G
示例#3
0
    txt_write(path_out, sim_id, gal, abund)


if __name__ == '__main__':

    path_flexce = join(os.path.abspath(os.path.dirname(__file__)), '')
    path_flexce_root = os.path.abspath(join(path_flexce, '..'))
    path_data = join(path_flexce, 'data')

    argv = None
    if argv is None:
        argv = sys.argv

    try:
        default_config_path = join(path_flexce_root, 'config')
        fname, path_config = utils.set_path(argv[1], default_config_path)
    except IndexError:
        path_config = join(os.getenv('HOME'), 'flexCE', 'examples')
        fname = 'sim0.cfg'
        print('\nUsing default parameters in \n{}'.format(argv[1]))

    file_in = join(path_config, fname)

    # TODO Add try...except to handle user-defined output path
    path_out = utils.substitute_dir_in_path(path_config, 'config', 'output')

    (simulation_id, yld_args, initialize_args, mass_bins_args, snia_dtd_args,
     inflows_args, outflows_args, warmgasres_args, sf_args) = \
        read_sim_cfg(file_in)
    mass_bins = utils.define_mass_bins(**mass_bins_args)
    ylds = utils.load_yields(path_data, yld_args, mass_bins)
示例#4
0
"""Run example scripts with the various PEERs running on the Dicom Test
Server VM."""

import test_config
import os
from utils import set_path

set_path()

script_dir = '../examples'

# storescu
cmd_template = '%s/storescu.py -aet %s -aec %s -implicit %s %d %s'
for pp in test_config.peers:
    files = '/data/PatientsTests/0008-Prostate_1/*'
    cmd = cmd_template % (script_dir, test_config.AET, pp['aet'], pp['host'],
                          pp['port'], files)
    print cmd
    os.system(cmd)
import utils

utils.set_path()
from RelationalGraph import *
from Potential import GaussianPotential
from EPBPLogVersion import EPBP
from GaBP import GaBP
import numpy as np
import time
from OneShot import OneShot, LiftedOneShot
from CompressedGraphSorted import CompressedGraphSorted
from copy import copy

seed = 0
utils.set_seed(seed=seed)

instance_category = []
instance_bank = []
for i in range(100):
    # for i in range(50):
    # for i in range(10):
    instance_category.append(f'c{i}')
for i in range(5):
    instance_bank.append(f'b{i}')

d = Domain((-50, 50), continuous=True, integral_points=linspace(-50, 50, 30))

diag = 10.
off_diag_coef = 1.
p1 = GaussianPotential(
    [0., 0.], [[diag, off_diag_coef * -7.], [off_diag_coef * -7., diag]])
示例#6
0
def main():
    args = parse_args()
    update_config(cfg, args)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    # Set the random seed manually for reproducibility.
    np.random.seed(cfg.SEED)
    torch.manual_seed(cfg.SEED)
    torch.cuda.manual_seed_all(cfg.SEED)

    # Loss
    criterion = CrossEntropyLoss(cfg.MODEL.NUM_CLASSES).cuda()

    # model and optimizer
    print(f"Definining network with {cfg.MODEL.LAYERS} layers...")
    model = Network(cfg.MODEL.INIT_CHANNELS, cfg.MODEL.NUM_CLASSES, cfg.MODEL.LAYERS, criterion, primitives_2,
                    drop_path_prob=cfg.TRAIN.DROPPATH_PROB)
    model = model.cuda()

    # weight params
    arch_params = list(map(id, model.arch_parameters()))
    weight_params = filter(lambda p: id(p) not in arch_params,
                           model.parameters())

    # Optimizer
    optimizer = optim.Adam(
        weight_params,
        lr=cfg.TRAIN.LR
    )

    # resume && make log dir and logger
    if args.load_path and os.path.exists(args.load_path):
        checkpoint_file = os.path.join(args.load_path, 'Model', 'checkpoint_best.pth')
        assert os.path.exists(checkpoint_file)
        checkpoint = torch.load(checkpoint_file)

        # load checkpoint
        begin_epoch = checkpoint['epoch']
        last_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        best_acc1 = checkpoint['best_acc1']
        optimizer.load_state_dict(checkpoint['optimizer'])
        args.path_helper = checkpoint['path_helper']

        logger = create_logger(args.path_helper['log_path'])
        logger.info("=> loaded checkpoint '{}'".format(checkpoint_file))
    else:
        exp_name = args.cfg.split('/')[-1].split('.')[0]
        args.path_helper = set_path('logs_search', exp_name)
        logger = create_logger(args.path_helper['log_path'])
        begin_epoch = cfg.TRAIN.BEGIN_EPOCH
        best_acc1 = 0.0
        last_epoch = -1

    logger.info(args)
    logger.info(cfg)

    # copy model file
    this_dir = os.path.dirname(__file__)
    shutil.copy2(
        os.path.join(this_dir, 'models', cfg.MODEL.NAME + '.py'),
        args.path_helper['ckpt_path'])

    # Datasets and dataloaders

    # The toy dataset is downloaded with 10 items for each partition. Remove the sample_size parameters to use the full toy dataset
    asv_train, asv_dev, asv_eval = asv_toys(sample_size=10)


    train_dataset = asv_train #MNIST('mydata', transform=totensor, train=True, download=True)
    val_dataset = asv_dev #MNIST('mydata', transform=totensor, train=False, download=True)
    train_loader = torch.utils.data.DataLoader(
        dataset=train_dataset,
        batch_size=cfg.TRAIN.BATCH_SIZE,
        num_workers=cfg.DATASET.NUM_WORKERS,
        pin_memory=True,
        shuffle=True,
        drop_last=True,
    )
    print(f'search.py: Train loader of {len(train_loader)} batches')
    print(f'Tot train set: {len(train_dataset)}')
    val_loader = torch.utils.data.DataLoader(
        dataset=val_dataset,
        batch_size=cfg.TRAIN.BATCH_SIZE,
        num_workers=cfg.DATASET.NUM_WORKERS,
        pin_memory=True,
        shuffle=True,
        drop_last=True,
    )
    print(f'search.py: Val loader of {len(val_loader)} batches')
    print(f'Tot val set {len(val_dataset)}')
    test_dataset = asv_eval #MNIST('mydata', transform=totensor, train=False, download=True)
    test_loader = torch.utils.data.DataLoader(
        dataset=test_dataset,
        batch_size=cfg.TRAIN.BATCH_SIZE,
        num_workers=cfg.DATASET.NUM_WORKERS,
        pin_memory=True,
        shuffle=True,
        drop_last=True,
    )

    # training setting
    writer_dict = {
        'writer': SummaryWriter(args.path_helper['log_path']),
        'train_global_steps': begin_epoch * len(train_loader),
        'valid_global_steps': begin_epoch // cfg.VAL_FREQ,
    }

    # training loop
    architect = Architect(model, cfg)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, cfg.TRAIN.END_EPOCH, cfg.TRAIN.LR_MIN,
        last_epoch=last_epoch
    )

    for epoch in tqdm(range(begin_epoch, cfg.TRAIN.END_EPOCH), desc='search progress'):
        model.train()

        genotype = model.genotype()
        logger.info('genotype = %s', genotype)

        if cfg.TRAIN.DROPPATH_PROB != 0:
            model.drop_path_prob = cfg.TRAIN.DROPPATH_PROB * epoch / (cfg.TRAIN.END_EPOCH - 1)

        train(cfg, model, optimizer, train_loader, val_loader, criterion, architect, epoch, writer_dict)

        if epoch % cfg.VAL_FREQ == 0:
            # get threshold and evaluate on validation set
            acc = validate_identification(cfg, model, test_loader, criterion)

            # remember best acc@1 and save checkpoint
            is_best = acc > best_acc1
            best_acc1 = max(acc, best_acc1)

            # save
            logger.info('=> saving checkpoint to {}'.format(args.path_helper['ckpt_path']))
            save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_acc1': best_acc1,
                'optimizer': optimizer.state_dict(),
                'arch': model.arch_parameters(),
                'genotype': genotype,
                'path_helper': args.path_helper
            }, is_best, args.path_helper['ckpt_path'], 'checkpoint_{}.pth'.format(epoch))

        lr_scheduler.step(epoch)
示例#7
0
def main():
    args = parse_args()
    update_config(cfg, args)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    # Set the random seed manually for reproducibility.
    np.random.seed(cfg.SEED)
    torch.manual_seed(cfg.SEED)
    torch.cuda.manual_seed_all(cfg.SEED)

    # model and optimizer
    model = eval('resnet.{}(num_classes={})'.format(cfg.MODEL.NAME,
                                                    cfg.MODEL.NUM_CLASSES))
    model = model.cuda()
    optimizer = optim.Adam(
        model.net_parameters()
        if hasattr(model, 'net_parameters') else model.parameters(),
        lr=cfg.TRAIN.LR,
    )

    # Loss
    criterion = CrossEntropyLoss(cfg.MODEL.NUM_CLASSES).cuda()

    # resume && make log dir and logger
    if args.load_path and os.path.exists(args.load_path):
        checkpoint_file = os.path.join(args.load_path, 'Model',
                                       'checkpoint_best.pth')
        assert os.path.exists(checkpoint_file)
        checkpoint = torch.load(checkpoint_file)

        # load checkpoint
        begin_epoch = checkpoint['epoch']
        last_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        best_eer = checkpoint['best_eer']
        optimizer.load_state_dict(checkpoint['optimizer'])
        args.path_helper = checkpoint['path_helper']

        logger = create_logger(args.path_helper['log_path'])
        logger.info("=> loaded checkpoint '{}'".format(checkpoint_file))
    else:
        exp_name = args.cfg.split('/')[-1].split('.')[0]
        args.path_helper = set_path('logs', exp_name)
        logger = create_logger(args.path_helper['log_path'])
        begin_epoch = cfg.TRAIN.BEGIN_EPOCH
        best_eer = 1.0
        last_epoch = -1
    logger.info(args)
    logger.info(cfg)
    logger.info("Number of parameters: {}".format(count_parameters(model)))

    # dataloader
    train_dataset = DeepSpeakerDataset(Path(cfg.DATASET.DATA_DIR),
                                       cfg.DATASET.SUB_DIR,
                                       cfg.DATASET.PARTIAL_N_FRAMES)
    test_dataset_verification = VoxcelebTestset(Path(cfg.DATASET.DATA_DIR),
                                                cfg.DATASET.PARTIAL_N_FRAMES)
    train_loader = torch.utils.data.DataLoader(
        dataset=train_dataset,
        batch_size=cfg.TRAIN.BATCH_SIZE,
        num_workers=cfg.DATASET.NUM_WORKERS,
        pin_memory=True,
        shuffle=True,
        drop_last=True,
    )
    test_loader_verification = torch.utils.data.DataLoader(
        dataset=test_dataset_verification,
        batch_size=1,
        num_workers=cfg.DATASET.NUM_WORKERS,
        pin_memory=True,
        shuffle=False,
        drop_last=False,
    )

    # training setting
    writer_dict = {
        'writer': SummaryWriter(args.path_helper['log_path']),
        'train_global_steps': begin_epoch * len(train_loader),
        'valid_global_steps': begin_epoch // cfg.VAL_FREQ,
    }

    # training loop
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer,
        cfg.TRAIN.END_EPOCH,
        cfg.TRAIN.LR_MIN,
        last_epoch=last_epoch)

    for epoch in tqdm(range(begin_epoch, cfg.TRAIN.END_EPOCH),
                      desc='train progress'):
        model.train()
        train_from_scratch(cfg, model, optimizer, train_loader, criterion,
                           epoch, writer_dict, lr_scheduler)
        if epoch % cfg.VAL_FREQ == 0:
            eer = validate_verification(cfg, model, test_loader_verification)

            # remember best acc@1 and save checkpoint
            is_best = eer < best_eer
            best_eer = min(eer, best_eer)

            # save
            logger.info('=> saving checkpoint to {}'.format(
                args.path_helper['ckpt_path']))
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_eer': best_eer,
                    'optimizer': optimizer.state_dict(),
                    'path_helper': args.path_helper
                }, is_best, args.path_helper['ckpt_path'],
                'checkpoint_{}.pth'.format(epoch))
        lr_scheduler.step(epoch)
def adapt_gmn():

    # ==> gpu configuration
    ut.initialize_GPU(args)

    # ==> set up model path and log path.
    model_path, log_path = ut.set_path(args)

    # ==> import library
    import keras
    import data_loader
    import model_factory
    import data_generator

    # ==> get dataset information
    trn_config = data_loader.get_config(args)

    params = {'cg': trn_config, 'processes': 12, 'batch_size': args.batch_size}

    trn_gen, val_gen = data_generator.setup_generator(**params)

    # ==> load networks
    gmn = model_factory.two_stream_matching_networks(trn_config,
                                                     sync=False,
                                                     adapt=False)
    model = model_factory.two_stream_matching_networks(trn_config,
                                                       sync=False,
                                                       adapt=True)

    # ==> attempt to load pre-trained model
    if args.resume:
        if os.path.isfile(args.resume):
            model.load_weights(os.path.join(args.resume), by_name=True)
            print('==> successfully loading the model: {}'.format(args.resume))
        else:
            print("==> no checkpoint found at '{}'".format(args.resume))

    # ==> attempt to load pre-trained GMN
    elif args.gmn_path:
        if os.path.isfile(args.gmn_path):
            gmn.load_weights(os.path.join(args.gmn_path), by_name=True)
            print('==> successfully loading the model: {}'.format(
                args.gmn_path))
        else:
            print("==> no checkpoint found at '{}'".format(args.gmn_path))

    # ==> print model summary
    model.summary()

    # ==> transfer weights from gmn to new model (this step is slow, but can't seem to avoid it)
    for i, layer in enumerate(gmn.layers):
        if isinstance(layer, model.__class__):
            for l in layer.layers:
                weights = l.get_weights()
                if len(weights) > 0:
                    #print('{}'.format(l.name))
                    model.layers[i].get_layer(l.name).set_weights(weights)
        else:
            weights = layer.get_weights()
            if len(weights) > 0:
                #print('{}'.format(layer.name))
                model.get_layer(layer.name).set_weights(weights)

    # ==> set up callbacks, e.g. lr schedule, tensorboard, save checkpoint.
    normal_lr = keras.callbacks.LearningRateScheduler(ut.step_decay(args))
    tbcallbacks = keras.callbacks.TensorBoard(log_dir=log_path,
                                              histogram_freq=0,
                                              write_graph=False,
                                              write_images=False)
    callbacks = [
        keras.callbacks.ModelCheckpoint(os.path.join(model_path, 'model.h5'),
                                        monitor='val_loss',
                                        save_best_only=True,
                                        mode='min'), normal_lr, tbcallbacks
    ]

    model.fit_generator(trn_gen,
                        steps_per_epoch=600,
                        epochs=args.epochs,
                        validation_data=val_gen,
                        validation_steps=100,
                        callbacks=callbacks,
                        verbose=1)
示例#9
0
文件: predict.py 项目: segwitt/gmn2
def predict(imgdict):

    # ==> gpu configuration
    ut.initialize_GPU(args)

    # ==> set up model path and log path.
    model_path, log_path = ut.set_path(args)

    # ==> import library
    import keras
    import data_loader
    import model_factory
    import data_generator

    # ==> get dataset information
    # trn_config = data_loader.get_config(args)

    # params = {'cg': trn_config,
    #           'processes': 12,
    #           'batch_size': args.batch_size
    #           }

    # trn_gen, val_gen = data_generator.setup_generator(**params)

    # ==> load networks

    class C(object):
        """docstring for C"""
        patchdims = (64, 64, 3)
        imgdims = (800, 800, 3)

        def __init__(self, arg):
            super(C, self).__init__()
            self.arg = arg

    gmn = model_factory.two_stream_matching_networks(C,
                                                     sync=False,
                                                     adapt=False)
    # model = model_factory.two_stream_matching_networks(C, sync=False, adapt=True)

    # ==> attempt to load pre-trained model
    if args.resume:
        if os.path.isfile(args.resume):
            model.load_weights(os.path.join(args.resume), by_name=True)
            print('==> successfully loading the model: {}'.format(args.resume))
        else:
            print("==> no checkpoint found at '{}'".format(args.resume))

    # ==> attempt to load pre-trained GMN
    elif args.gmn_path:
        if os.path.isfile(args.gmn_path):
            gmn.load_weights(os.path.join(args.gmn_path), by_name=True)
            print('==> successfully loading the model: {}'.format(
                args.gmn_path))
        else:
            print("==> no checkpoint found at '{}'".format(args.gmn_path))

    # ==> print model summary
    # model.summary()

    # ==> transfer weights from gmn to new model (this step is slow, but can't seem to avoid it)
    # for i,layer in enumerate(gmn.layers):
    #     if isinstance(layer, model.__class__):
    #         for l in layer.layers:
    #             weights = l.get_weights()
    #             if len(weights) > 0:
    #                 #print('{}'.format(l.name))
    #                 model.layers[i].get_layer(l.name).set_weights(weights)
    #     else:
    #         weights = layer.get_weights()
    #         if len(weights) > 0:
    #             #print('{}'.format(layer.name))
    #             model.get_layer(layer.name).set_weights(weights)

    return gmn.predict(imgdict)  #model
import utils

utils.set_path(('..', ))
from RelationalGraph import *
from MLNPotential import *
from Potential import QuadraticPotential, TablePotential, HybridQuadraticPotential
from EPBPLogVersion import EPBP
from GaBP import GaBP
from OneShot import OneShot, LiftedOneShot
from NPVI import NPVI, LiftedNPVI
from CompressedGraphSorted import CompressedGraphSorted
import numpy as np
import time
from copy import copy

seed = 0
utils.set_seed(seed)

# KF stuff
from KalmanFilter import KalmanFilter
from Graph import *
import scipy.io

cluster_mat = scipy.io.loadmat(
    'Data/RKF/cluster_NcutDiscrete.mat')['NcutDiscrete']
well_t = scipy.io.loadmat('Data/RKF/well_t.mat')['well_t']
ans = scipy.io.loadmat('Data/RKF/LRKF_cycle.mat')['res']
param = scipy.io.loadmat('Data/RKF/LRKF_cycle.mat')['param']
print(well_t.shape)

idx = np.where(cluster_mat[:, 1] == 1)[0]
示例#11
0
def main():
    args = parse_args()
    update_config(cfg, args)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    # Set the random seed manually for reproducibility.
    np.random.seed(cfg.SEED)
    torch.manual_seed(cfg.SEED)
    torch.cuda.manual_seed_all(cfg.SEED)

    # Loss
    criterion = CrossEntropyLoss(cfg.MODEL.NUM_CLASSES).cuda()

    # load arch
    genotype = eval(args.text_arch)

    model = Network(cfg.MODEL.INIT_CHANNELS, cfg.MODEL.NUM_CLASSES,
                    cfg.MODEL.LAYERS, genotype)
    model = model.cuda()

    optimizer = optim.Adam(
        model.parameters(),
        lr=cfg.TRAIN.LR,
        weight_decay=cfg.TRAIN.WD,
    )

    # resume && make log dir and logger
    if args.load_path and os.path.exists(args.load_path):
        checkpoint_file = os.path.join(args.load_path, 'Model',
                                       'checkpoint_best.pth')
        assert os.path.exists(checkpoint_file)
        checkpoint = torch.load(checkpoint_file)

        # load checkpoint
        begin_epoch = checkpoint['epoch']
        last_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        best_acc1 = checkpoint['best_acc1']
        optimizer.load_state_dict(checkpoint['optimizer'])
        args.path_helper = checkpoint['path_helper']

        logger = create_logger(args.path_helper['log_path'])
        logger.info("=> loaded checkloggpoint '{}'".format(checkpoint_file))
    else:
        exp_name = args.cfg.split('/')[-1].split('.')[0]
        args.path_helper = set_path('logs_scratch', exp_name)
        logger = create_logger(args.path_helper['log_path'])
        begin_epoch = cfg.TRAIN.BEGIN_EPOCH
        best_acc1 = 0.0
        last_epoch = -1
    logger.info(args)
    logger.info(cfg)
    logger.info(f"selected architecture: {genotype}")
    logger.info("Number of parameters: {}".format(count_parameters(model)))

    # copy model file
    this_dir = os.path.dirname(__file__)
    shutil.copy2(os.path.join(this_dir, './models', cfg.MODEL.NAME + '.py'),
                 args.path_helper['ckpt_path'])

    # dataloader
    train_dataset = DeepSpeakerDataset(Path(cfg.DATASET.DATA_DIR),
                                       cfg.DATASET.PARTIAL_N_FRAMES, 'train')
    train_loader = torch.utils.data.DataLoader(
        dataset=train_dataset,
        batch_size=cfg.TRAIN.BATCH_SIZE,
        num_workers=cfg.DATASET.NUM_WORKERS,
        pin_memory=True,
        shuffle=True,
        drop_last=True,
    )
    test_dataset = DeepSpeakerDataset(Path(cfg.DATASET.DATA_DIR),
                                      cfg.DATASET.PARTIAL_N_FRAMES,
                                      'test',
                                      is_test=True)
    test_loader = torch.utils.data.DataLoader(
        dataset=test_dataset,
        batch_size=1,
        num_workers=cfg.DATASET.NUM_WORKERS,
        pin_memory=True,
        shuffle=True,
        drop_last=True,
    )

    # training setting
    writer_dict = {
        'writer': SummaryWriter(args.path_helper['log_path']),
        'train_global_steps': begin_epoch * len(train_loader),
        'valid_global_steps': begin_epoch // cfg.VAL_FREQ,
    }

    # training loop
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer,
        cfg.TRAIN.END_EPOCH,
        cfg.TRAIN.LR_MIN,
        last_epoch=last_epoch)

    for epoch in tqdm(range(begin_epoch, cfg.TRAIN.END_EPOCH),
                      desc='train progress'):
        model.train()
        model.drop_path_prob = cfg.MODEL.DROP_PATH_PROB * epoch / cfg.TRAIN.END_EPOCH

        train_from_scratch(cfg, model, optimizer, train_loader, criterion,
                           epoch, writer_dict)

        if epoch % cfg.VAL_FREQ == 0 or epoch == cfg.TRAIN.END_EPOCH - 1:
            acc = validate_identification(cfg, model, test_loader, criterion)

            # remember best acc@1 and save checkpoint
            is_best = acc > best_acc1
            best_acc1 = max(acc, best_acc1)

            # save
            logger.info('=> saving checkpoint to {}'.format(
                args.path_helper['ckpt_path']))
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_acc1': best_acc1,
                    'optimizer': optimizer.state_dict(),
                    'path_helper': args.path_helper,
                    'genotype': genotype,
                }, is_best, args.path_helper['ckpt_path'],
                'checkpoint_{}.pth'.format(epoch))

        lr_scheduler.step(epoch)
示例#12
0
    path_plot = os.getcwd()

path_flexce_top = os.path.abspath(join(path_plot, '../..'))
path_flexce = join(path_flexce_top, 'flexce')
path_fileio = join(path_flexce_top, 'flexCE', 'fileio')
path_plots = join(path_flexce_top, 'plots')
# ---------------------

sys.path.insert(0, path_flexce)
import utils
import plot.utils as putils
from fileio import txt_io, cfg_io

default_config_path = join(path_plots, 'config')
default_output_path = join(path_flexce_top, 'output')
fin, path_config = utils.set_path(sys.argv[1], default_config_path)

try:
    stem = path_config.split('config/')[1]
except IndexError:
    stem = ''

path_output = join(default_output_path, stem)

path_plot_out = utils.substitute_dir_in_path(path_config, 'config', 'plots')
if not os.path.isdir(path_plot_out):
    os.makedirs(path_plot_out)

# Read config file
cfg = cfg_io.read_plot_config(join(path_config, fin))
colors = putils.get_colors(cfg)
示例#13
0
    txt_write(path_out, sim_id, gal, abund)


if __name__ == '__main__':

    path_flexce = join(os.path.abspath(os.path.dirname(__file__)), '')
    path_flexce_root = os.path.abspath(join(path_flexce, '..'))
    path_data = join(path_flexce, 'data')

    argv = None
    if argv is None:
        argv = sys.argv

    try:
        default_config_path = join(path_flexce_root, 'config')
        fname, path_config = utils.set_path(argv[1], default_config_path)
    except IndexError:
        path_config = join(os.getenv('HOME'), 'flexCE', 'examples')
        fname = 'sim0.cfg'
        print('\nUsing default parameters in \n{}'.format(argv[1]))

    file_in = join(path_config, fname)

    # TODO Add try...except to handle user-defined output path
    path_out = utils.substitute_dir_in_path(path_config, 'config', 'output')

    (simulation_id, yld_args, initialize_args, mass_bins_args, snia_dtd_args,
     inflows_args, outflows_args, warmgasres_args, sf_args) = \
        read_sim_cfg(file_in)
    mass_bins = utils.define_mass_bins(**mass_bins_args)
    ylds = utils.load_yields(path_data, yld_args, mass_bins)
示例#14
0
"""Run example scripts with the various PEERs running on the pydicom Test
Server VM."""

import test_config
import os
from utils import set_path

set_path()

script_dir = '../examples'


# storescu
cmd_template = '%s/storescu.py -aet %s -aec %s -implicit %s %d %s'
for pp in test_config.peers:
    files = '/data/PatientsTests/0008-Prostate_1/*'
    cmd = cmd_template % (script_dir, test_config.AET,
                          pp['aet'], pp['host'], pp['port'], files)
    print cmd
    os.system(cmd)
示例#15
0
    def train(
        self,
        model,
        data_iterators,
        optimizer,
        tb_prefix="exp/",
        prefix="neural_network",
    ):
        sup_losses = [utils.AverageMeter() for _ in range(len(self.criterion) + 1)]
        vat_losses = utils.AverageMeter()
        perfs = [utils.AverageMeter() for _ in range(3)]
        tb_dir = self.args.tensorboard_dir
        if not tb_dir.endswith("/"):
            tb_dir += "/"
        tb_dir += tb_prefix
        writer = utils.set_tensorborad_writer(tb_dir)

        model.train()

        criterion = self.criterion
        score_param_index = evaluator.get_scoring_func_param_index(self.target_labels)
        weight = self.weight
        if weight is None:
            weight = [1.0] * len(criterion)
        assert len(weight) == len(criterion)

        tbIndex = 0

        best_val_metric = 0.0

        # for k in tqdm(range(self.init_iter, self.args.iters)):
        for k in range(self.init_iter, self.args.iters):

            # reset
            if k > 0 and k % self.args.log_interval == 0:
                tbIndex += 1
                val_mean_loss, val_metrics, _ = self.eval(
                    model, data_iterators, key="val"
                )

                if val_metrics[0] > best_val_metric:
                    best_val_metric = val_metrics[0]
                    filename = (
                        self.args.checkpoint_dir + prefix + "_{}.pt".format("BestModel")
                    )
                    utils.set_path(filename)
                    utils.save_checkpoint(model, k, filename, optimizer, self.vat_loss)

                # writer.add_scalar("Train/Loss", sup_losses[0].avg, tbIndex)
                # writer.add_scalar("Train/VAT_Loss", vat_losses.avg, tbIndex)

                # Using defn:(loss = supervised_loss + reg_loss + self.args.alpha * lds)
                writer.add_scalar("Train/total_loss", loss, tbIndex)
                writer.add_scalar("Train/supervised_loss", supervised_loss, tbIndex)
                writer.add_scalar("Train/reg_loss", reg_loss, tbIndex)
                writer.add_scalar("Train/lds", self.args.alpha * lds, tbIndex)

                writer.add_scalar("Valid/Loss", val_mean_loss, tbIndex)

                writer.add_scalar("Train_perf/f1_score", perfs[0].avg, tbIndex)
                writer.add_scalar("Valid/f1_score", val_metrics[0], tbIndex)
                writer.add_scalar("Train_perf/accuracy", perfs[1].avg, tbIndex)
                writer.add_scalar("Valid/accuracy", val_metrics[1], tbIndex)

                train_metrics_avg = [p.avg for p in perfs]
                train_metrics_val = [p.val for p in perfs]

                print(
                    "Iteration: {}\t Loss {:.4f} ({:.4f})\t".format(
                        k, sup_losses[0].val, sup_losses[0].avg
                    ),
                    "VATLoss {:.4f} ({:.4f})\tValid_Loss {:.4f}".format(
                        vat_losses.val, vat_losses.avg, val_mean_loss
                    ),
                    "Train_Metrics: {}\t Train_Metrics_AVG {}\t".format(
                        train_metrics_val, train_metrics_avg
                    ),
                    "Valid_Metrics: {}\t".format(val_metrics),
                    "Best Perf: {}\t".format(best_val_metric),
                )
                print("-" * 80)
                for a in sup_losses:
                    a.reset()
                for a in perfs:
                    a.reset()
                vat_losses.reset()

                # re-activate trai mode
                model.train()

            x_l, y_l = next(data_iterators["labeled"])
            if not isinstance(y_l, (list, tuple)):
                y_l = [y_l]
            x_ul = next(data_iterators["unlabeled"])

            x_l, y_l = x_l.to(self.device), [t.to(self.device) for t in y_l]
            if not isinstance(x_ul, (list, tuple)):
                x_ul = x_ul.to(self.device)
            else:
                x_ul = [t.to(self.device) for t in x_ul]

            optimizer.zero_grad()

            lds = self.vat_loss(model, x_ul)

            if isinstance(x_ul, (list, tuple)):
                x_ul = x_ul[0]
            # print('')
            # print('LDS: ', lds)
            outputs = model(x_l)
            if not isinstance(outputs, (list, tuple)):
                outputs = [outputs]

            reg_loss = 0.0
            # print('is_entropy_based: ', self.is_entropy_based)
            if self.is_entropy_based:
                outputs_ul = model(x_ul)
                if not isinstance(outputs_ul, (list, tuple)):
                    outputs_ul = [outputs_ul]
                supervised_reg_losses = [
                    w * (0.0 if c is None else c(o))
                    for c, o, w in zip(self.entropy, outputs, self.weight)
                ]
                # print('supervised_reg_losses: ', supervised_reg_losses)
                unsupervised_reg_losses = [
                    w * (0.0 if c is None else c(o))
                    for c, o, w in zip(self.entropy, outputs_ul, self.weight)
                ]
                # print('unsupervised_reg_losses: ', unsupervised_reg_losses)

                # reg_losses = [
                #     (a+b)/(x_ul.size(0) + x_l.size(0))
                #     for a,b in zip(supervised_reg_losses, unsupervised_reg_losses)
                # ]
                reg_losses = [
                    ((a / (x_l.size(0))) + self.args.alpha * (b / (x_ul.size(0))))
                    / (1.0 + self.args.alpha)
                    for a, b in zip(supervised_reg_losses, unsupervised_reg_losses)
                ]

                # print('reg_losses: ', reg_losses)
                reg_loss = sum(reg_losses)
                # print('reg_loss: ', reg_loss)

            supervised_losses = [
                w * (c(o, gt) if o.size(1) == 1 else c(o, gt.squeeze(1)))
                for c, o, gt, w in zip(criterion, outputs, y_l, weight)
            ]
            supervised_loss = sum(supervised_losses)

            # print('supervised_losses: ', supervised_losses)
            # print('supervised_loss: ', supervised_loss)

            treeId_pred, treeId_true = None, None
            if score_param_index[0] is not None:
                i = score_param_index[0]
                _, pred_classes = torch.max(outputs[i], dim=1)
                treeId_true = y_l[i].view(-1).tolist()
                treeId_pred = pred_classes.view(-1).tolist()
                treeId_pred = np.array(treeId_pred, dtype=np.int32)
                treeId_true = np.array(treeId_true, dtype=np.int32)

            loss = supervised_loss + reg_loss + self.args.alpha * lds
            # loss = supervised_loss # + reg_loss + self.args.alpha * lds
            loss.backward()
            optimizer.step()

            if hasattr(self.vat_loss, "update_ema_variables"):
                self.vat_loss.update_ema_variables(model, self.args.ema_decay, k)

            # print('loss_final: ', loss)

            metrics = scoreF.scorePerformance(treeId_pred, treeId_true)

            for i in range(len(supervised_losses)):
                sup_losses[i + 1].update(supervised_losses[i].item(), x_l.shape[0])
            sup_losses[0].update(supervised_loss.item(), x_l.shape[0])
            for i in range(len(metrics)):
                perfs[i].update(metrics[i], x_l.shape[0])
            vat_losses.update(lds.item(), x_ul.shape[0])

            if k > 0 and k % self.args.chkpt_freq == 0:
                filename = self.args.checkpoint_dir + prefix + "_{}.pt".format(k)
                utils.set_path(filename)
                utils.save_checkpoint(model, k, filename, optimizer, self.vat_loss)

        filename = self.args.checkpoint_dir + prefix + "_{}.pt".format(self.args.iters)
        utils.set_path(filename)
        utils.save_checkpoint(model, self.args.iters, filename, optimizer)
import utils

utils.set_path(('..', '../gibbs'))
from RelationalGraph import *
from MLNPotential import *
from Potential import QuadraticPotential, TablePotential, HybridQuadraticPotential
from EPBPLogVersion import EPBP
from OneShot import OneShot, LiftedOneShot
from NPVI import NPVI, LiftedNPVI
from CompressedGraphSorted import CompressedGraphSorted
import numpy as np
import time
from copy import copy

seed = 1
utils.set_seed(seed)

from hybrid_gaussian_mrf import HybridGaussianSampler
from hybrid_gaussian_mrf import convert_to_bn, block_gibbs_sample, get_crv_marg, get_drv_marg, \
    get_rv_marg_map_from_bn_params
import sampling_utils

from utils import kl_continuous_logpdf

num_x = 2
num_y = 2

X = []
for x in range(num_x):
    X.append(f'x{x}')
Y = []
示例#17
0
def main():
    args = parse_args()
    update_config(cfg, args)
    # assert args.text_arch

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    # Set the random seed manually for reproducibility.
    np.random.seed(cfg.SEED)
    torch.manual_seed(cfg.SEED)
    torch.cuda.manual_seed_all(cfg.SEED)

    # Loss
    criterion = FocalLoss().cuda()

    # load arch
    genotype = eval(
        "Genotype(normal=[('dil_conv_5x5', 1), ('dil_conv_3x3', 0), ('dil_conv_5x5', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('sep_conv_3x3', 2), ('dil_conv_3x3', 2), ('max_pool_3x3', 1)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 1), ('max_pool_3x3', 0), ('dil_conv_5x5', 2), ('max_pool_3x3', 1), ('dil_conv_5x5', 3), ('dil_conv_3x3', 2), ('dil_conv_5x5', 4), ('dil_conv_5x5', 2)], reduce_concat=range(2, 6))"
    )

    model = IR_50(cfg.MODEL.NUM_CLASSES)
    # model = Network(cfg.MODEL.INIT_CHANNELS, cfg.MODEL.NUM_CLASSES, cfg.MODEL.LAYERS, genotype)
    model = model.cuda()

    # optimizer = optim.Adam(
    #     model.parameters(),
    #     lr=cfg.TRAIN.LR
    # )
    optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)

    # resume && make log dir and logger
    if args.load_path and os.path.exists(args.load_path):
        checkpoint_file = args.load_path
        assert os.path.exists(checkpoint_file)
        checkpoint = torch.load(checkpoint_file)

        # load checkpoint
        begin_epoch = checkpoint['epoch']
        last_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        best_eer = checkpoint['best_eer']
        optimizer.load_state_dict(checkpoint['optimizer'])
        args.path_helper = checkpoint['path_helper']

        # begin_epoch = cfg.TRAIN.BEGIN_EPOCH
        # last_epoch = -1
        # best_eer = 1.0
        # del checkpoint['state_dict']['classifier.weight']
        # del checkpoint['state_dict']['classifier.bias']
        # model.load_state_dict(checkpoint['state_dict'], strict=False)
        # # best_eer = checkpoint['best_eer']
        # # optimizer.load_state_dict(checkpoint['optimizer'])
        # exp_name = args.cfg.split('/')[-1].split('.')[0]
        # args.path_helper = set_path('/content/drive/My Drive/zalo/AutoSpeech/logs_scratch', exp_name)

        logger = create_logger(args.path_helper['log_path'])
        logger.info("=> loaded checkloggpoint '{}'".format(checkpoint_file))
    else:
        exp_name = args.cfg.split('/')[-1].split('.')[0]
        args.path_helper = set_path('logs_scratch', exp_name)
        logger = create_logger(args.path_helper['log_path'])
        begin_epoch = cfg.TRAIN.BEGIN_EPOCH
        best_eer = 1.0
        last_epoch = -1
    logger.info(args)
    logger.info(cfg)
    logger.info(f"selected architecture: {genotype}")
    logger.info("Number of parameters: {}".format(count_parameters(model)))

    # dataloader
    train_dataset = DeepSpeakerDataset(Path(cfg.DATASET.DATA_DIR),
                                       cfg.DATASET.SUB_DIR,
                                       cfg.DATASET.PARTIAL_N_FRAMES)
    train_loader = torch.utils.data.DataLoader(
        dataset=train_dataset,
        batch_size=cfg.TRAIN.BATCH_SIZE,
        num_workers=cfg.DATASET.NUM_WORKERS,
        pin_memory=True,
        shuffle=True,
        drop_last=True,
    )
    test_dataset_verification = VoxcelebTestset(Path(cfg.DATASET.DATA_DIR),
                                                cfg.DATASET.PARTIAL_N_FRAMES)
    test_loader_verification = torch.utils.data.DataLoader(
        dataset=test_dataset_verification,
        batch_size=1,
        num_workers=cfg.DATASET.NUM_WORKERS,
        pin_memory=True,
        shuffle=False,
        drop_last=False,
    )

    # training setting
    writer_dict = {
        'writer': SummaryWriter(args.path_helper['log_path']),
        'train_global_steps': begin_epoch * len(train_loader),
        'valid_global_steps': begin_epoch // cfg.VAL_FREQ,
    }

    # training loop
    # lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
    #     optimizer, cfg.TRAIN.END_EPOCH, cfg.TRAIN.LR_MIN,
    #     last_epoch=last_epoch
    # )

    for epoch in tqdm(range(begin_epoch, cfg.TRAIN.END_EPOCH),
                      desc='train progress'):
        model.train()
        model.drop_path_prob = cfg.MODEL.DROP_PATH_PROB * epoch / cfg.TRAIN.END_EPOCH

        train_from_scratch(cfg, model, optimizer, train_loader, criterion,
                           epoch, writer_dict)

        if epoch == 210 or epoch == 240 or epoch == 270:
            schedule_lr(optimizer)

        if epoch % cfg.VAL_FREQ == 0 or epoch == cfg.TRAIN.END_EPOCH - 1:
            # eer = validate_verification(cfg, model, test_loader_verification)

            # # remember best acc@1 and save checkpoint
            # is_best = eer < best_eer
            # best_eer = min(eer, best_eer)

            # save
            logger.info('=> saving checkpoint to {}'.format(
                args.path_helper['ckpt_path']))
            print('=> saving checkpoint to {}'.format(
                args.path_helper['ckpt_path']))
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_eer': best_eer,
                    'optimizer': optimizer.state_dict(),
                    'path_helper': args.path_helper
                }, True, args.path_helper['ckpt_path'],
                'checkpoint_{}.pth'.format(epoch))