Exemplo n.º 1
0
def main():
    args = parse_args()
    loader = load_val(config.val_path, args, distributed=False)

    model = darknet().eval()
    #model = torchvision.models.resnet101( pretrained=True ).eval()

    checkpoint_path = os.path.join(config.checkpoint_path,
                                   config.checkpoint_name)
    load_checkpoint(model, checkpoint_path)
    validate(loader, model, args)
Exemplo n.º 2
0
from initializers.spn_initializer import SPNInitializer, ShapeAgnosticLoad
from metrics.base import STNCrossEntropy, STNAccuracy
from networks.svhn import SVHNMultiLineResNetNetwork
from utils.create_gif import make_gif
from utils.create_video import make_video
from utils.datatypes import Size

from operations.debug import *
from operations.ones import *
from operations.disable_shearing import *
from utils.plot_log import LogPlotter



if __name__ == '__main__':
    parser = utils.parse_args()
    args = parser.parse_args()

    if args.send_bboxes and args.ip is None:
        parser.print_usage()
        raise ValueError("You must specify an upstream ip if you want to send the bboxes of each iteration")

    time = datetime.datetime.now().isoformat()
    args.log_dir = os.path.join(args.log_dir, "{}_{}".format(time, args.log_name))
    args.log_file = os.path.join(args.log_dir, 'log')

    image_size = Size(width=200, height=200)
    source_shape = (args.batch_size, 1, image_size.height, image_size.width)
    target_shape = Size(width=50, height=50)

    # adjustable network parameters
Exemplo n.º 3
0
import torch
from torchvision.models import resnet18


dataset_size = 1281216

def plot_lr( optim, args, hyper ):
    lr_hist = []

    batch_size = hyper.batch_size
    n_per_epoch = int( dataset_size / batch_size )
    print( "number of iterations per epoch:{}".format( n_per_epoch ) )

    start_epoch = args.start_epoch - 1
    end_epoch = start_epoch + args.epochs

    for epoch in range( start_epoch, end_epoch ):
        for i in range( n_per_epoch ):
            niter = epoch * n_per_epoch + i
            lr = adjust_learning_rate( optim, niter, hyper )
            lr_hist.append( lr )

    index = list( range( n_per_epoch * args.epochs ) )
    plt.plot( index, lr_hist )
    plt.show()

# create a dummy optimizer
args = parse_args()
hyper = HyperParams( args.__dict__ )
optim = torch.optim.SGD( resnet18().parameters(), lr=args.base_lr )
plot_lr( optim, args, hyper )
Exemplo n.º 4
0
    if args.model_file != None:
        if os.path.exists(args.model_file):
            print("Loading model from file: {}".format(args.model_file))
            model.load_state_dict(torch.load(args.model_file))
        else:
            print("Model file does not exist, training from scratch...")

    # Check for cuda usage
    device = torch.device("cuda:0") if args.cuda else "cpu"

    if not args.e:
        train_net(
            model,
            batch_iter,
            args.epochs,
            optimizer,
            args.val_freq,
            device=device,
            print_interval=args.print_interval,
        )

        if args.model_file != None:
            torch.save(model.cpu().state_dict(), args.model_file)
    else:
        model = model.to(device)
        eval_net(model, batch_iter, args.val_freq, target_vocab, device=device)


if __name__ == "__main__":
    main(parse_args())
Exemplo n.º 5
0
def main():
    # --- argument parsing ---
    (model_name, epochs, min_count, cores, checkpoint_every, cache_in_memory,
     lowercase, _, args) = parse_args(default_model_name='d2v',
                                      default_epochs=20)

    # --- init logging ---
    logger = init_logging(name=model_name,
                          basic=True,
                          to_file=True,
                          to_stdout=False)
    log_args(logger, args)

    input_dir = join(SMPL_PATH, 'dewiki')
    model_dir = join(EMB_PATH, model_name)
    if not exists(model_dir):
        makedirs(model_dir)
    logger.info('model dir: ' + model_dir)

    t0 = time()
    documents = Documents(input_dir=input_dir,
                          logger=logger,
                          lowercase=lowercase)
    if cache_in_memory:
        documents = list(documents)
    gc.collect()

    # Model initialization
    logger.info('Initializing new model')
    model = Doc2Vec(
        vector_size=300,
        window=15,
        min_count=20,
        sample=1e-5,
        negative=5,
        hs=0,
        dm=0,
        dbow_words=1,
        dm_concat=0,
        seed=42,
        epochs=epochs,
        workers=cores,
    )
    logger.info('Building vocab')
    model.build_vocab(documents)

    # Model Training
    epoch_saver = EpochSaver(model_name, model_dir, checkpoint_every)
    epoch_logger = EpochLogger(logger)

    logger.info('Training {:d} epochs'.format(epochs))
    model.train(
        documents,
        total_examples=model.corpus_count,
        epochs=model.epochs,
        report_delay=60,
        callbacks=[epoch_logger, epoch_saver],
    )

    # saving model
    file_path = join(model_dir, model_name)
    logger.info('Writing model to ' + file_path)
    model.callbacks = ()
    model.save(file_path)

    t1 = int(time() - t0)
    logger.info("all done in {:02d}:{:02d}:{:02d}".format(
        t1 // 3600, (t1 // 60) % 60, t1 % 60))
Exemplo n.º 6
0
def main():
    # --- argument parsing ---
    (
        model_name, epochs, min_count, cores, checkpoint_every,
        cache_in_memory, lowercase, fasttext, args
    ) = parse_args(default_model_name='w2v_default', default_epochs=100)

    # --- init logging ---
    logger = init_logging(name=model_name, basic=True, to_file=True, to_stdout=False)
    log_args(logger, args)

    input_dir = join(SMPL_PATH, 'dewiki')
    model_dir = join(EMB_PATH, model_name)
    if not exists(model_dir):
        makedirs(model_dir)
    logger.info('model dir: ' + model_dir)

    t0 = time()
    if cache_in_memory:
        # needs approx. 25GB of RAM
        logger.info('cache data in memory')
        sentences = [s for s in Sentences(input_dir, logger, lowercase=lowercase)]
    else:
        sentences = Sentences(input_dir, logger, use_file_cache=True, lowercase=lowercase)
    gc.collect()

    # Model initialization
    logger.info('Initializing new model')
    if fasttext:
        model = FastText(
            size=300,
            window=5,
            min_count=min_count,
            sample=1e-5,
            negative=5,
            sg=1,
            seed=42,
            iter=epochs,
            workers=cores,
            min_n=3,
            max_n=6,
        )
    else:
        model = Word2Vec(
            size=300,
            window=5,
            min_count=min_count,
            sample=1e-5,
            negative=5,
            sg=1,
            seed=42,
            iter=epochs,
            workers=cores,
        )
    logger.info('Building vocab')
    model.build_vocab(sentences, progress_per=100_000)

    # Model Training
    epoch_saver = EpochSaver(model_name, model_dir, checkpoint_every)
    epoch_logger = EpochLogger(logger)

    logger.info('Training {:d} epochs'.format(epochs))
    model.train(
        sentences,
        total_examples=model.corpus_count,
        epochs=model.epochs,
        report_delay=60,
        callbacks=[epoch_logger, epoch_saver],
    )

    # saving model
    file_path = join(model_dir, model_name)
    logger.info('Writing model to ' + file_path)
    model.callbacks = ()
    model.save(file_path)

    t1 = int(time() - t0)
    logger.info("all done in {:02d}:{:02d}:{:02d}".format(t1//3600, (t1//60) % 60, t1 % 60))
Exemplo n.º 7
0
from data_io.file_iter import FileBasedIter
from data_io.lstm_iter import LSTMIter, InitStateLSTMIter
from initializers.spn_initializer import SPNInitializer, ShapeAgnosticLoad
from metrics.base import STNCrossEntropy, STNAccuracy
from networks.svhn import SVHNMultiLineResNetNetwork
from utils.create_gif import make_gif
from utils.create_video import make_video
from utils.datatypes import Size

from operations.debug import *
from operations.ones import *
from operations.disable_shearing import *
from utils.plot_log import LogPlotter

if __name__ == '__main__':
    parser = utils.parse_args()
    args = parser.parse_args()

    if args.send_bboxes and args.ip is None:
        parser.print_usage()
        raise ValueError(
            "You must specify an upstream ip if you want to send the bboxes of each iteration"
        )

    time = datetime.datetime.now().isoformat()
    args.log_dir = os.path.join(args.log_dir,
                                "{}_{}".format(time, args.log_name))
    args.log_file = os.path.join(args.log_dir, 'log')

    image_size = Size(width=200, height=200)
    source_shape = (args.batch_size, 1, image_size.height, image_size.width)