Пример #1
0
                    type=int,
                    help="Use CUDA on the listed devices.")
parser.add_argument('-restore',
                    default='',
                    type=str,
                    help="restore checkpoint")
parser.add_argument('-seed', type=int, default=1234, help="Random seed")
parser.add_argument('-notrain',
                    default=False,
                    action='store_true',
                    help="train or not")
opt = parser.parse_args()
config = utils.read_config(opt.config)
torch.manual_seed(opt.seed)

print('#Start:', utils.format_time(time.localtime()))

# checkpoint
if opt.restore:
    print('loading checkpoint...\n')
    checkpoints = torch.load(opt.restore)
    config = checkpoints['config']
    threshold = checkpoints['threshold']
else:
    threshold = 0.5

if 'train_batch_size' not in config:
    config.train_batch_size = config.batch_size
    config.test_batch_size = config.batch_size
    config.load_emb = False
# cuda
# optim.set_parameters(list(model.parameters()))

if config.schedule:
    # scheduler = L.CosineAnnealingLR(optim.optimizer, T_max=config.epoch)
    scheduler = L.StepLR(optim.optimizer, step_size=15, gamma=0.2)

# total number of parameters
param_count = 0
for param in model.parameters():
    param_count += param.view(-1).size()[0]

# logging modeule
if not os.path.exists(config.log):
    os.mkdir(config.log)
if opt.log == '':
    log_path = config.log + utils.format_time(time.localtime()) + '/'
else:
    log_path = config.log + opt.log + '/'
if not os.path.exists(log_path):
    os.mkdir(log_path)
print(('log_path:', log_path))

writer = SummaryWriter(log_path)

logging = utils.logging(log_path +
                        'log.txt')  # 单独写一个logging的函数,直接调用,既print,又记录到Log文件里。
logging_csv = utils.logging_csv(log_path + 'record.csv')
for k, v in list(config.items()):
    logging("%s:\t%s\n" % (str(k), str(v)))
logging("\n")
logging(repr(model) + "\n\n")
Пример #3
0
                        default='./data/data/target_label_dict.json',
                        type=str,
                        help="label_dict")
    parser.add_argument('-label_dict_test_file',
                        default='./data/data/test_label_dict.json',
                        type=str,
                        help="label_dict")

    opt = parser.parse_args()
    config = utils.read_config(opt.config)
    torch.manual_seed(opt.seed)

    if not os.path.exists(config.log):
        os.mkdir(config.log)
    if opt.log == '':
        log_path = config.log + str(utils.format_time(
            time.localtime())).replace(':', '_') + '/'
    else:
        log_path = config.log + opt.log + '/'
    if not os.path.exists(log_path):
        os.mkdir(log_path)

    logging = utils.logging(log_path + 'log.txt')

    # checkpoint
    if opt.restore:
        print('loading checkpoint...\n')

        checkpoints = torch.load(opt.restore, map_location='cpu')

    # cuda
    use_cuda = torch.cuda.is_available() and len(opt.gpus) > 0