#add BioDL package to my path import sys sys.path.insert(0, '/home/horcrux/BioDL/') #and import all the stuff from data import * #from distributed import * from fastai.callbacks import * from datetime import datetime import gc from fastai.utils.mem import GPUMemTrace mtrace = GPUMemTrace() #from pynvml import * #nvmlInit() #handle = nvmlDeviceGetHandleByIndex(0) #info = nvmlDeviceGetMemoryInfo(handle) #from fastai.distributed import * import argparse parser = argparse.ArgumentParser() #parser.add_argument("--local_rank", type=int) parser.add_argument("--n_cpus",type=int) args = parser.parse_args() #torch.cuda.set_device(args.local_rank) #torch.distributed.init_process_group(backend='nccl', init_method='env://') path = Path('./') model_path_base = '/home/horcrux/TrainLM_fastlr/models/train_LM_round' model_dir = Path('/home/horcrux/TrainLM_fastlr/models/lm/') log_path = Path('/home/horcrux/TrainLM_fastlr/train_logs/log_train_LM') data_path = Path('/home/horcrux/data')
import argparse import time import math import numpy as np import torch import torch.nn as nn import data import model from utils import batchify, get_batch, repackage_hidden from fastai.utils.mem import GPUMemTrace mtrace = GPUMemTrace() parser = argparse.ArgumentParser(description='PyTorch PennTreeBank RNN/LSTM Language Model') parser.add_argument('--data', type=str, default='data/penn/', help='location of the data corpus') parser.add_argument('--model', type=str, default='LSTM', help='type of recurrent net (LSTM, QRNN, GRU)') parser.add_argument('--emsize', type=int, default=400, help='size of word embeddings') parser.add_argument('--nhid', type=int, default=1150, help='number of hidden units per layer') parser.add_argument('--nlayers', type=int, default=3, help='number of layers') parser.add_argument('--lr', type=float, default=30, help='initial learning rate') parser.add_argument('--clip', type=float, default=0.25, help='gradient clipping') parser.add_argument('--epochs', type=int, default=8000, help='upper epoch limit')